summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h34
-rw-r--r--include/acpi/cppc_acpi.h5
-rw-r--r--include/acpi/video.h9
-rw-r--r--include/asm-generic/bug.h16
-rw-r--r--include/asm-generic/cacheflush.h14
-rw-r--r--include/asm-generic/compat.h9
-rw-r--r--include/asm-generic/mshyperv.h9
-rw-r--r--include/asm-generic/signal.h2
-rw-r--r--include/asm-generic/termios-base.h78
-rw-r--r--include/asm-generic/termios.h108
-rw-r--r--include/asm-generic/unaligned.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h51
-rw-r--r--include/clocksource/timer-ti-dm.h112
-rw-r--r--include/crypto/aria.h17
-rw-r--r--include/crypto/internal/aead.h25
-rw-r--r--include/crypto/scatterwalk.h6
-rw-r--r--include/drm/display/drm_dp.h3
-rw-r--r--include/drm/display/drm_dp_helper.h2
-rw-r--r--include/drm/display/drm_dp_mst_helper.h237
-rw-r--r--include/drm/drm_atomic_helper.h12
-rw-r--r--include/drm/drm_bridge.h12
-rw-r--r--include/drm/drm_connector.h26
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_encoder.h6
-rw-r--r--include/drm/drm_fb_dma_helper.h (renamed from include/drm/drm_fb_cma_helper.h)10
-rw-r--r--include/drm/drm_file.h9
-rw-r--r--include/drm/drm_format_helper.h69
-rw-r--r--include/drm/drm_fourcc.h4
-rw-r--r--include/drm/drm_framebuffer.h8
-rw-r--r--include/drm/drm_gem.h57
-rw-r--r--include/drm/drm_gem_dma_helper.h (renamed from include/drm/drm_gem_cma_helper.h)158
-rw-r--r--include/drm/drm_gem_shmem_helper.h2
-rw-r--r--include/drm/drm_mipi_dbi.h2
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_modes.h35
-rw-r--r--include/drm/drm_panel.h7
-rw-r--r--include/drm/drm_plane.h52
-rw-r--r--include/drm/drm_plane_helper.h40
-rw-r--r--include/drm/drm_print.h78
-rw-r--r--include/drm/drm_probe_helper.h9
-rw-r--r--include/drm/gpu_scheduler.h9
-rw-r--r--include/drm/i915_pciids.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h93
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/drm/ttm/ttm_resource.h40
-rw-r--r--include/dt-bindings/ata/ahci.h20
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h1
-rw-r--r--include/dt-bindings/clock/exynos850.h136
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h1
-rw-r--r--include/dt-bindings/clock/imx93-clock.h9
-rw-r--r--include/dt-bindings/clock/lochnagar.h (renamed from include/dt-bindings/clk/lochnagar.h)0
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h6
-rw-r--r--include/dt-bindings/clock/mediatek,mt6795-clk.h275
-rw-r--r--include/dt-bindings/clock/mediatek,mt8365-clk.h373
-rw-r--r--include/dt-bindings/clock/microchip,mpfs-clock.h23
-rw-r--r--include/dt-bindings/clock/mt8195-clk.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8909.h218
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h1
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc8280xp.h35
-rw-r--r--include/dt-bindings/clock/qcom,lcc-ipq806x.h2
-rw-r--r--include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h5
-rw-r--r--include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h2
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h1
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-dispcc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-gcc.h234
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-dispcc.h103
-rw-r--r--include/dt-bindings/clock/rockchip,rv1126-cru.h632
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h128
-rw-r--r--include/dt-bindings/clock/versaclock.h (renamed from include/dt-bindings/clk/versaclock.h)0
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h7
-rw-r--r--include/dt-bindings/iio/adc/at91-sama5d2_adc.h3
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6370_adc.h18
-rw-r--r--include/dt-bindings/interrupt-controller/irqc-rzg2l.h25
-rw-r--r--include/dt-bindings/leds/common.h7
-rw-r--r--include/dt-bindings/memory/mt6795-larb-port.h95
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h10
-rw-r--r--include/dt-bindings/phy/phy.h1
-rw-r--r--include/dt-bindings/pinctrl/k3.h15
-rw-r--r--include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h1280
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h (renamed from include/dt-bindings/pinctrl/pinctrl-starfive.h)6
-rw-r--r--include/dt-bindings/pinctrl/samsung.h7
-rw-r--r--include/dt-bindings/power/fsl,imx93-power.h15
-rw-r--r--include/dt-bindings/power/imx8mp-power.h6
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h12
-rw-r--r--include/dt-bindings/power/rk3588-power.h69
-rw-r--r--include/dt-bindings/power/rockchip,rv1126-power.h35
-rw-r--r--include/dt-bindings/reset/bt1-ccu.h9
-rw-r--r--include/dt-bindings/reset/mediatek,mt6795-resets.h53
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h3
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h18
-rw-r--r--include/kunit/assert.h28
-rw-r--r--include/kunit/resource.h16
-rw-r--r--include/kunit/test.h123
-rw-r--r--include/linux/a.out.h18
-rw-r--r--include/linux/acpi.h42
-rw-r--r--include/linux/ahci_platform.h8
-rw-r--r--include/linux/amd-pstate.h77
-rw-r--r--include/linux/arm_ffa.h36
-rw-r--r--include/linux/ata.h39
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/binfmts.h3
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/bitmap.h13
-rw-r--r--include/linux/bitops.h30
-rw-r--r--include/linux/blk-cgroup.h5
-rw-r--r--include/linux/blk-mq-pci.h4
-rw-r--r--include/linux/blk-mq-rdma.h2
-rw-r--r--include/linux/blk-mq-virtio.h2
-rw-r--r--include/linux/blk-mq.h36
-rw-r--r--include/linux/blk_types.h3
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/bma150.h4
-rw-r--r--include/linux/bpf-cgroup.h17
-rw-r--r--include/linux/bpf.h184
-rw-r--r--include/linux/bpf_mem_alloc.h28
-rw-r--r--include/linux/bpf_types.h1
-rw-r--r--include/linux/bpf_verifier.h40
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/btf.h21
-rw-r--r--include/linux/buffer_head.h50
-rw-r--r--include/linux/cache.h13
-rw-r--r--include/linux/can/dev.h5
-rw-r--r--include/linux/can/skb.h57
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/cfi.h59
-rw-r--r--include/linux/cfi_types.h45
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/cgroup.h34
-rw-r--r--include/linux/clk-provider.h66
-rw-r--r--include/linux/clk/davinci.h8
-rw-r--r--include/linux/clk/spear.h14
-rw-r--r--include/linux/clkdev.h2
-rw-r--r--include/linux/compiler-clang.h37
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler.h16
-rw-r--r--include/linux/compiler_attributes.h10
-rw-r--r--include/linux/compiler_types.h21
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/coresight.h23
-rw-r--r--include/linux/counter.h187
-rw-r--r--include/linux/cpumask.h137
-rw-r--r--include/linux/damon.h92
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/delayacct.h16
-rw-r--r--include/linux/dlm.h5
-rw-r--r--include/linux/dma-iommu.h93
-rw-r--r--include/linux/dma-resv.h16
-rw-r--r--include/linux/dma/hsu.h6
-rw-r--r--include/linux/dmar.h4
-rw-r--r--include/linux/dynamic_debug.h176
-rw-r--r--include/linux/edac.h30
-rw-r--r--include/linux/efi.h35
-rw-r--r--include/linux/entry-common.h1
-rw-r--r--include/linux/etherdevice.h22
-rw-r--r--include/linux/eventfd.h2
-rw-r--r--include/linux/export-internal.h6
-rw-r--r--include/linux/f2fs_fs.h40
-rw-r--r--include/linux/fb.h10
-rw-r--r--include/linux/filter.h16
-rw-r--r--include/linux/find.h310
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h45
-rw-r--r--include/linux/fortify-string.h247
-rw-r--r--include/linux/freezer.h245
-rw-r--r--include/linux/fs.h38
-rw-r--r--include/linux/fscrypt.h39
-rw-r--r--include/linux/fsverity.h3
-rw-r--r--include/linux/ftrace.h41
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/genl_magic_func.h1
-rw-r--r--include/linux/gfp.h26
-rw-r--r--include/linux/gpio/consumer.h13
-rw-r--r--include/linux/hdmi.h7
-rw-r--r--include/linux/hid.h33
-rw-r--r--include/linux/highmem.h3
-rw-r--r--include/linux/hisi_acc_qm.h63
-rw-r--r--include/linux/htcpld.h2
-rw-r--r--include/linux/huge_mm.h28
-rw-r--r--include/linux/hugetlb.h73
-rw-r--r--include/linux/hugetlb_cgroup.h19
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/ieee80211.h14
-rw-r--r--include/linux/if_pppol2tp.h2
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/igmp.h4
-rw-r--r--include/linux/iio/consumer.h28
-rw-r--r--include/linux/iio/iio-opaque.h2
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/iio/types.h3
-rw-r--r--include/linux/init.h8
-rw-r--r--include/linux/input/auo-pixcir-ts.h44
-rw-r--r--include/linux/instrumented.h59
-rw-r--r--include/linux/interconnect-provider.h5
-rw-r--r--include/linux/io-pgtable.h3
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/io_uring.h18
-rw-r--r--include/linux/io_uring_types.h9
-rw-r--r--include/linux/iommu.h47
-rw-r--r--include/linux/ioport.h8
-rw-r--r--include/linux/iosys-map.h15
-rw-r--r--include/linux/iova.h2
-rw-r--r--include/linux/iova_bitmap.h26
-rw-r--r--include/linux/ipc_namespace.h5
-rw-r--r--include/linux/irqchip.h4
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/isa.h52
-rw-r--r--include/linux/iversion.h72
-rw-r--r--include/linux/kallsyms.h2
-rw-r--r--include/linux/kasan.h55
-rw-r--r--include/linux/kernfs.h3
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key.h6
-rw-r--r--include/linux/khugepaged.h13
-rw-r--r--include/linux/kmsan-checks.h83
-rw-r--r--include/linux/kmsan.h330
-rw-r--r--include/linux/kmsan_types.h35
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/ksm.h3
-rw-r--r--include/linux/kvm_host.h16
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/linear_range.h11
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/maple_tree.h685
-rw-r--r--include/linux/mdev.h77
-rw-r--r--include/linux/mdio/mdio-i2c.h10
-rw-r--r--include/linux/mei_aux.h12
-rw-r--r--include/linux/memcontrol.h144
-rw-r--r--include/linux/memory-tiers.h102
-rw-r--r--include/linux/memory_hotplug.h30
-rw-r--r--include/linux/mempolicy.h13
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/ocelot.h62
-rw-r--r--include/linux/mfd/rk808.h91
-rw-r--r--include/linux/migrate.h30
-rw-r--r--include/linux/mlx5/device.h32
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/fs_helpers.h48
-rw-r--r--include/linux/mlx5/mlx5_ifc.h293
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h24
-rw-r--r--include/linux/mlx5/qp.h9
-rw-r--r--include/linux/mm.h161
-rw-r--r--include/linux/mm_inline.h242
-rw-r--r--include/linux/mm_types.h178
-rw-r--r--include/linux/mm_types_task.h12
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmdebug.h6
-rw-r--r--include/linux/mmzone.h282
-rw-r--r--include/linux/module.h10
-rw-r--r--include/linux/mroute.h6
-rw-r--r--include/linux/mroute6.h4
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/netdevice.h65
-rw-r--r--include/linux/netfilter.h5
-rw-r--r--include/linux/netfilter_defs.h8
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/node.h29
-rw-r--r--include/linux/nodemask.h17
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/of_irq.h6
-rw-r--r--include/linux/once.h28
-rw-r--r--include/linux/oom.h11
-rw-r--r--include/linux/overflow.h72
-rw-r--r--include/linux/page-flags-layout.h16
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/page_counter.h26
-rw-r--r--include/linux/page_ext.h24
-rw-r--r--include/linux/page_idle.h34
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pagemap.h16
-rw-r--r--include/linux/pagewalk.h10
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/pcs-altera-tse.h17
-rw-r--r--include/linux/pe.h2
-rw-r--r--include/linux/percpu-rwsem.h6
-rw-r--r--include/linux/percpu_counter.h32
-rw-r--r--include/linux/perf/arm_pmu.h9
-rw-r--r--include/linux/perf/riscv_pmu.h2
-rw-r--r--include/linux/perf_event.h77
-rw-r--r--include/linux/pgtable.h26
-rw-r--r--include/linux/phy.h38
-rw-r--r--include/linux/phy/pcie.h12
-rw-r--r--include/linux/phy/tegra/xusb.h4
-rw-r--r--include/linux/phylink.h40
-rw-r--r--include/linux/platform_data/adp5588.h171
-rw-r--r--include/linux/platform_data/cros_ec_commands.h18
-rw-r--r--include/linux/platform_data/cros_ec_proto.h1
-rw-r--r--include/linux/platform_data/dma-hsu.h2
-rw-r--r--include/linux/platform_data/emc2305.h22
-rw-r--r--include/linux/platform_data/pca953x.h2
-rw-r--r--include/linux/platform_data/ssm2518.h21
-rw-r--r--include/linux/platform_data/tps68470.h7
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h6
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h11
-rw-r--r--include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h76
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h4
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h1
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h2
-rw-r--r--include/linux/pm.h37
-rw-r--r--include/linux/pm_runtime.h20
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/posix_acl_xattr.h12
-rw-r--r--include/linux/power_supply.h48
-rw-r--r--include/linux/prandom.h17
-rw-r--r--include/linux/preempt.h42
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/property.h4
-rw-r--r--include/linux/pse-pd/pse.h129
-rw-r--r--include/linux/psi.h12
-rw-r--r--include/linux/psi_types.h31
-rw-r--r--include/linux/pwm.h20
-rw-r--r--include/linux/random.h7
-rw-r--r--include/linux/rcupdate.h42
-rw-r--r--include/linux/rcutiny.h50
-rw-r--r--include/linux/rcutree.h40
-rw-r--r--include/linux/reboot.h8
-rw-r--r--include/linux/regmap.h11
-rw-r--r--include/linux/regulator/consumer.h27
-rw-r--r--include/linux/regulator/gpio-regulator.h2
-rw-r--r--include/linux/regulator/mt6331-regulator.h46
-rw-r--r--include/linux/regulator/mt6332-regulator.h27
-rw-r--r--include/linux/remoteproc.h22
-rw-r--r--include/linux/resctrl.h64
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rmap.h73
-rw-r--r--include/linux/rwlock.h2
-rw-r--r--include/linux/sbitmap.h3
-rw-r--r--include/linux/sched.h71
-rw-r--r--include/linux/sched/coredump.h7
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sched/task.h3
-rw-r--r--include/linux/scmi_protocol.h4
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/serdev.h1
-rw-r--r--include/linux/serial_8250.h5
-rw-r--r--include/linux/serial_core.h53
-rw-r--r--include/linux/sfp.h5
-rw-r--r--include/linux/shmem_fs.h16
-rw-r--r--include/linux/skbuff.h43
-rw-r--r--include/linux/slab.h196
-rw-r--r--include/linux/soc/apple/rtkit.h12
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h2
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h2
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h3
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h19
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h30
-rw-r--r--include/linux/soc/qcom/qmi.h20
-rw-r--r--include/linux/soc/qcom/smd-rpm.h1
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h2
-rw-r--r--include/linux/sockptr.h5
-rw-r--r--include/linux/soundwire/sdw.h9
-rw-r--r--include/linux/soundwire/sdw_intel.h63
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h2
-rw-r--r--include/linux/spinlock_rt.h2
-rw-r--r--include/linux/spinlock_up.h2
-rw-r--r--include/linux/srcutiny.h10
-rw-r--r--include/linux/stackdepot.h8
-rw-r--r--include/linux/stat.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h43
-rw-r--r--include/linux/string_helpers.h7
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h13
-rw-r--r--include/linux/sunrpc/svc.h20
-rw-r--r--include/linux/sunrpc/xdr.h2
-rw-r--r--include/linux/suspend.h11
-rw-r--r--include/linux/swab.h25
-rw-r--r--include/linux/swap.h41
-rw-r--r--include/linux/swap_cgroup.h4
-rw-r--r--include/linux/swapfile.h7
-rw-r--r--include/linux/swapops.h150
-rw-r--r--include/linux/syslog.h3
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/linux/termios_internal.h49
-rw-r--r--include/linux/thermal.h85
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/tnum.h20
-rw-r--r--include/linux/trace.h36
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/linux/tty.h10
-rw-r--r--include/linux/tty_driver.h10
-rw-r--r--include/linux/tty_ldisc.h4
-rw-r--r--include/linux/u64_stats_sync.h145
-rw-r--r--include/linux/uaccess.h19
-rw-r--r--include/linux/ucb1400.h2
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/umh.h9
-rw-r--r--include/linux/units.h3
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/serial.h4
-rw-r--r--include/linux/usb/tcpci.h22
-rw-r--r--include/linux/user_events.h15
-rw-r--r--include/linux/user_namespace.h35
-rw-r--r--include/linux/userfaultfd_k.h7
-rw-r--r--include/linux/vdpa.h1
-rw-r--r--include/linux/verification.h8
-rw-r--r--include/linux/vfio.h57
-rw-r--r--include/linux/vfio_pci_core.h149
-rw-r--r--include/linux/virtio_pci_legacy.h2
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmacache.h28
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/vt_kern.h1
-rw-r--r--include/linux/wait.h42
-rw-r--r--include/linux/wireless.h10
-rw-r--r--include/linux/writeback.h8
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/math-emu/op-common.h3
-rw-r--r--include/media/davinci/dm355_ccdc.h308
-rw-r--r--include/media/davinci/dm644x_ccdc.h171
-rw-r--r--include/media/davinci/isif.h518
-rw-r--r--include/media/davinci/vpbe_display.h6
-rw-r--r--include/media/drv-intf/saa7146.h472
-rw-r--r--include/media/drv-intf/saa7146_vv.h266
-rw-r--r--include/media/v4l2-common.h4
-rw-r--r--include/media/v4l2-ctrls.h140
-rw-r--r--include/media/v4l2-mem2mem.h12
-rw-r--r--include/media/v4l2-uvc.h359
-rw-r--r--include/media/videobuf2-core.h16
-rw-r--r--include/media/videobuf2-dvb.h2
-rw-r--r--include/media/videobuf2-v4l2.h16
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/net/9p/9p.h3
-rw-r--r--include/net/9p/transport.h5
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/af_vsock.h2
-rw-r--r--include/net/bluetooth/bluetooth.h1
-rw-r--r--include/net/bluetooth/hci.h4
-rw-r--r--include/net/bluetooth/hci_core.h17
-rw-r--r--include/net/bluetooth/hci_sock.h2
-rw-r--r--include/net/bluetooth/hci_sync.h9
-rw-r--r--include/net/bluetooth/mgmt.h52
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/cfg80211.h43
-rw-r--r--include/net/devlink.h27
-rw-r--r--include/net/dn.h231
-rw-r--r--include/net/dn_dev.h200
-rw-r--r--include/net/dn_fib.h169
-rw-r--r--include/net/dn_neigh.h32
-rw-r--r--include/net/dn_nsp.h201
-rw-r--r--include/net/dn_route.h118
-rw-r--r--include/net/dsa.h37
-rw-r--r--include/net/dst.h6
-rw-r--r--include/net/dst_metadata.h41
-rw-r--r--include/net/flow.h26
-rw-r--r--include/net/flow_dissector.h9
-rw-r--r--include/net/flow_offload.h6
-rw-r--r--include/net/genetlink.h10
-rw-r--r--include/net/gro.h33
-rw-r--r--include/net/ieee802154_netdev.h43
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_hashtables.h99
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/ipcomp.h2
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/ipv6_stubs.h4
-rw-r--r--include/net/mac80211.h198
-rw-r--r--include/net/macsec.h28
-rw-r--r--include/net/neighbour.h5
-rw-r--r--include/net/netfilter/nf_conntrack_bpf.h25
-rw-r--r--include/net/netfilter/nf_conntrack_core.h6
-rw-r--r--include/net/netfilter/nf_nat_helper.h1
-rw-r--r--include/net/netlink.h13
-rw-r--r--include/net/netns/generic.h2
-rw-r--r--include/net/netns/ipv4.h4
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/netns/smc.h3
-rw-r--r--include/net/nl802154.h6
-rw-r--r--include/net/pkt_cls.h25
-rw-r--r--include/net/pkt_sched.h25
-rw-r--r--include/net/red.h1
-rw-r--r--include/net/sch_generic.h16
-rw-r--r--include/net/sock.h28
-rw-r--r--include/net/tcp.h12
-rw-r--r--include/net/tls.h10
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/udplite.h8
-rw-r--r--include/net/xdp.h4
-rw-r--r--include/net/xdp_sock_drv.h10
-rw-r--r--include/net/xfrm.h24
-rw-r--r--include/net/xsk_buff_pool.h2
-rw-r--r--include/rdma/ib_cm.h10
-rw-r--r--include/rdma/ib_sa.h3
-rw-r--r--include/rdma/rdma_cm.h13
-rw-r--r--include/rdma/rdma_vt.h2
-rw-r--r--include/rdma/uverbs_ioctl.h13
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_status.h12
-rw-r--r--include/soc/at91/pm.h16
-rw-r--r--include/soc/fsl/qman.h9
-rw-r--r--include/soc/mediatek/smi.h5
-rw-r--r--include/soc/microchip/mpfs.h8
-rw-r--r--include/soc/mscc/ocelot.h144
-rw-r--r--include/soc/sifive/sifive_ccache.h16
-rw-r--r--include/soc/sifive/sifive_l2_cache.h16
-rw-r--r--include/soc/tegra/fuse.h6
-rw-r--r--include/soc/tegra/tegra-cbb.h47
-rw-r--r--include/sound/acp62_chip_offset_byte.h444
-rw-r--r--include/sound/cs42l42.h1
-rw-r--r--include/sound/hda_codec.h3
-rw-r--r--include/sound/hda_register.h20
-rw-r--r--include/sound/hdaudio.h10
-rw-r--r--include/sound/hdaudio_ext.h14
-rw-r--r--include/sound/intel-nhlt.h7
-rw-r--r--include/sound/memalloc.h3
-rw-r--r--include/sound/pcm.h20
-rw-r--r--include/sound/simple_card_utils.h1
-rw-r--r--include/sound/soc-acpi-intel-match.h3
-rw-r--r--include/sound/soc.h36
-rw-r--r--include/sound/sof.h2
-rw-r--r--include/sound/sof/control.h6
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/ipc4/header.h5
-rw-r--r--include/trace/events/btrfs.h2
-rw-r--r--include/trace/events/dlm.h26
-rw-r--r--include/trace/events/erofs.h11
-rw-r--r--include/trace/events/f2fs.h37
-rw-r--r--include/trace/events/habanalabs.h93
-rw-r--r--include/trace/events/huge_memory.h37
-rw-r--r--include/trace/events/io_uring.h29
-rw-r--r--include/trace/events/kmem.h74
-rw-r--r--include/trace/events/maple_tree.h123
-rw-r--r--include/trace/events/mmap.h73
-rw-r--r--include/trace/events/scmi.h30
-rw-r--r--include/trace/events/sof.h121
-rw-r--r--include/trace/events/sof_intel.h148
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h26
-rw-r--r--include/uapi/asm-generic/mman-common.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h8
-rw-r--r--include/uapi/drm/drm_fourcc.h34
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/panfrost_drm.h47
-rw-r--r--include/uapi/linux/bpf.h182
-rw-r--r--include/uapi/linux/btrfs.h6
-rw-r--r--include/uapi/linux/btrfs_tree.h4
-rw-r--r--include/uapi/linux/can.h55
-rw-r--r--include/uapi/linux/can/raw.h1
-rw-r--r--include/uapi/linux/counter.h8
-rw-r--r--include/uapi/linux/dlm.h1
-rw-r--r--include/uapi/linux/dn.h149
-rw-r--r--include/uapi/linux/dw100.h14
-rw-r--r--include/uapi/linux/ethtool.h63
-rw-r--r--include/uapi/linux/ethtool_netlink.h17
-rw-r--r--include/uapi/linux/fuse.h6
-rw-r--r--include/uapi/linux/hid.h26
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h12
-rw-r--r--include/uapi/linux/if_macsec.h2
-rw-r--r--include/uapi/linux/if_tun.h2
-rw-r--r--include/uapi/linux/iio/types.h10
-rw-r--r--include/uapi/linux/in.h22
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/io_uring.h17
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/l2tp.h2
-rw-r--r--include/uapi/linux/landlock.h10
-rw-r--r--include/uapi/linux/lwtunnel.h10
-rw-r--r--include/uapi/linux/netfilter.h2
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/uapi/linux/netfilter/xt_AUDIT.h4
-rw-r--r--include/uapi/linux/netfilter/xt_connmark.h13
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h14
-rw-r--r--include/uapi/linux/netfilter_decnet.h72
-rw-r--r--include/uapi/linux/netlink.h31
-rw-r--r--include/uapi/linux/nl80211.h25
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/perf_event.h57
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/pkt_sched.h11
-rw-r--r--include/uapi/linux/psci.h14
-rw-r--r--include/uapi/linux/sed-opal.h13
-rw-r--r--include/uapi/linux/seg6_local.h24
-rw-r--r--include/uapi/linux/stat.h4
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h5
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h13
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h7
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h5
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h5
-rw-r--r--include/uapi/linux/tls.h30
-rw-r--r--include/uapi/linux/ublk_cmd.h8
-rw-r--r--include/uapi/linux/usbip.h26
-rw-r--r--include/uapi/linux/userfaultfd.h4
-rw-r--r--include/uapi/linux/v4l2-controls.h8
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--include/uapi/linux/vfio.h142
-rw-r--r--include/uapi/linux/videodev2.h7
-rw-r--r--include/uapi/linux/virtio_blk.h19
-rw-r--r--include/uapi/misc/habanalabs.h137
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h17
-rw-r--r--include/uapi/mtd/mtd-abi.h64
-rw-r--r--include/uapi/rdma/efa-abi.h4
-rw-r--r--include/uapi/rdma/mlx5-abi.h1
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h1
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h4
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h7
-rw-r--r--include/uapi/sound/asequencer.h16
-rw-r--r--include/uapi/sound/asoc.h4
-rw-r--r--include/uapi/sound/asound.h16
-rw-r--r--include/uapi/sound/asound_fm.h15
-rw-r--r--include/uapi/sound/compress_offload.h17
-rw-r--r--include/uapi/sound/compress_params.h38
-rw-r--r--include/uapi/sound/emu10k1.h16
-rw-r--r--include/uapi/sound/hdsp.h14
-rw-r--r--include/uapi/sound/hdspm.h15
-rw-r--r--include/uapi/sound/sb16_csp.h15
-rw-r--r--include/uapi/sound/sfnt_info.h15
-rw-r--r--include/uapi/sound/snd_sst_tokens.h10
-rw-r--r--include/uapi/sound/tlv.h11
-rw-r--r--include/uapi/sound/usb_stream.h14
-rw-r--r--include/ufs/ufshcd.h46
-rw-r--r--include/video/vga.h20
-rw-r--r--include/xen/xen-ops.h6
630 files changed, 16249 insertions, 7622 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index e7d27373ff71..c09d72986968 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -365,7 +365,6 @@ struct acpi_device {
int device_type;
acpi_handle handle; /* no handle for fixed hardware */
struct fwnode_handle fwnode;
- struct acpi_device *parent;
struct list_head wakeup_list;
struct list_head del_list;
struct acpi_device_status status;
@@ -458,6 +457,14 @@ static inline void *acpi_driver_data(struct acpi_device *d)
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev)
+{
+ if (adev->dev.parent)
+ return to_acpi_device(adev->dev.parent);
+
+ return NULL;
+}
+
static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta)
{
*((u32 *)&adev->status) = sta;
@@ -512,7 +519,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
* External Functions
*/
-struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
@@ -613,8 +619,7 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
struct fwnode_handle *fwnode,
const struct iommu_ops *ops);
-int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
- u64 *size);
+int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id);
static inline int acpi_dma_configure(struct device *dev,
@@ -733,10 +738,24 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
}
bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
-struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier);
+struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
+ struct acpi_device *start);
+
+/**
+ * for_each_acpi_consumer_dev - iterate over the consumer ACPI devices for a
+ * given supplier
+ * @supplier: Pointer to the supplier's ACPI device
+ * @consumer: Pointer to &struct acpi_device to hold the consumer, initially NULL
+ */
+#define for_each_acpi_consumer_dev(supplier, consumer) \
+ for (consumer = acpi_dev_get_next_consumer_dev(supplier, NULL); \
+ consumer; \
+ consumer = acpi_dev_get_next_consumer_dev(supplier, consumer))
+
struct acpi_device *
acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
struct acpi_device *
@@ -767,9 +786,10 @@ static inline void acpi_dev_put(struct acpi_device *adev)
put_device(&adev->dev);
}
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
+struct acpi_device *acpi_get_acpi_dev(acpi_handle handle);
-static inline void acpi_bus_put_acpi_device(struct acpi_device *adev)
+static inline void acpi_put_acpi_dev(struct acpi_device *adev)
{
acpi_dev_put(adev);
}
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index f73d357ecdf5..c5614444031f 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -140,6 +140,7 @@ extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
+extern bool cppc_perf_ctrs_in_pcc(void);
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
@@ -173,6 +174,10 @@ static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
return -ENOTSUPP;
}
+static inline bool cppc_perf_ctrs_in_pcc(void)
+{
+ return false;
+}
static inline bool acpi_cpc_valid(void)
{
return false;
diff --git a/include/acpi/video.h b/include/acpi/video.h
index db8548ff03ce..a275c35e5249 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -48,15 +48,18 @@ enum acpi_backlight_type {
acpi_backlight_video,
acpi_backlight_vendor,
acpi_backlight_native,
+ acpi_backlight_nvidia_wmi_ec,
+ acpi_backlight_apple_gmux,
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
+extern void acpi_video_register_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
-extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
+extern bool acpi_video_backlight_use_native(void);
/*
* Note: The value returned by acpi_video_handles_brightness_key_presses()
* may change over time and should not be cached.
@@ -68,6 +71,7 @@ extern int acpi_video_get_levels(struct acpi_device *device,
#else
static inline int acpi_video_register(void) { return -ENODEV; }
static inline void acpi_video_unregister(void) { return; }
+static inline void acpi_video_register_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
@@ -77,8 +81,9 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
return acpi_backlight_vendor;
}
-static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+static inline bool acpi_video_backlight_use_native(void)
{
+ return true;
}
static inline bool acpi_video_handles_brightness_key_presses(void)
{
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index ba1f860af38b..4050b191e1a9 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -220,22 +220,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
# define WARN_ON_SMP(x) ({0;})
#endif
-/*
- * WARN_ON_FUNCTION_MISMATCH() warns if a value doesn't match a
- * function address, and can be useful for catching issues with
- * callback functions, for example.
- *
- * With CONFIG_CFI_CLANG, the warning is disabled because the
- * compiler replaces function addresses taken in C code with
- * local jump table addresses, which breaks cross-module function
- * address equality.
- */
-#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_MODULES)
-# define WARN_ON_FUNCTION_MISMATCH(x, fn) ({ 0; })
-#else
-# define WARN_ON_FUNCTION_MISMATCH(x, fn) WARN_ON_ONCE((x) != (fn))
-#endif
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4f07afacbc23..f46258d1a080 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -2,6 +2,8 @@
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+#include <linux/instrumented.h>
+
struct mm_struct;
struct vm_area_struct;
struct page;
@@ -105,14 +107,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
memcpy(dst, src, len); \
flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
+
#ifndef copy_from_user_page
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
#endif
#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index d06308a2a7a8..aeb257ad3d1a 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -14,12 +14,17 @@
#define COMPAT_OFF_T_MAX 0x7fffffff
#endif
-#if !defined(compat_arg_u64) && !defined(CONFIG_CPU_BIG_ENDIAN)
+#ifndef compat_arg_u64
+#ifdef CONFIG_CPU_BIG_ENDIAN
#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#else
+#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo
+#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo
+#endif
#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
((u64)name##_hi << 32))
-#endif
+#endif /* compat_arg_u64 */
/* These types are common across all compat ABIs */
typedef u32 compat_size_t;
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index c05d2ce9b6cd..bfb9eb9d7215 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -105,15 +105,12 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
}
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
-static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
- __u64 d_info2)
+static inline u64 hv_generate_guest_id(u64 kernel_version)
{
- __u64 guest_id = 0;
+ u64 guest_id;
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
- guest_id |= (d_info1 << 48);
+ guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (kernel_version << 16);
- guest_id |= d_info2;
return guest_id;
}
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index c53984fa9761..663dd6d0795d 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -5,8 +5,6 @@
#include <uapi/asm-generic/signal.h>
#ifndef __ASSEMBLY__
-#ifdef SA_RESTORER
-#endif
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
deleted file mode 100644
index 59c5a3bd4a6e..000000000000
--- a/include/asm-generic/termios-base.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* termios.h: generic termios/termio user copying/translation
- */
-
-#ifndef _ASM_GENERIC_TERMIOS_BASE_H
-#define _ASM_GENERIC_TERMIOS_BASE_H
-
-#include <linux/uaccess.h>
-
-#ifndef __ARCH_TERMIO_GETPUT
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifndef user_termios_to_kernel_termios
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#endif
-
-#ifndef kernel_termios_to_user_termios
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif
-
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __ARCH_TERMIO_GETPUT */
-
-#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
deleted file mode 100644
index b1398d0d4a1d..000000000000
--- a/include/asm-generic/termios.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_TERMIOS_H
-#define _ASM_GENERIC_TERMIOS_H
-
-
-#include <linux/uaccess.h>
-#include <uapi/asm-generic/termios.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- const struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifdef TCGETS2
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#else /* TCGETS2 */
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#endif /* TCGETS2 */
-
-#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index df30f11b4a46..699650f81970 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -126,7 +126,7 @@ static inline void put_unaligned_le24(const u32 val, void *p)
__put_unaligned_le24(val, p);
}
-static inline void __put_unaligned_be48(const u64 val, __u8 *p)
+static inline void __put_unaligned_be48(const u64 val, u8 *p)
{
*p++ = val >> 40;
*p++ = val >> 32;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7515a465ec03..c15de165ec8f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -154,6 +154,14 @@
#define MEM_DISCARD(sec) *(.mem##sec)
#endif
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
+#define PATCHABLE_DISCARDS
+#else
+#define KEEP_PATCHABLE
+#define PATCHABLE_DISCARDS *(__patchable_function_entries)
+#endif
+
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
* The ftrace call sites are logged to a section whose name depends on the
@@ -172,7 +180,7 @@
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
- KEEP(*(__patchable_function_entries)) \
+ KEEP_PATCHABLE \
__stop_mcount_loc = .; \
ftrace_stub_graph = ftrace_stub; \
ftrace_ops_list_func = arch_ftrace_ops_list_func;
@@ -345,6 +353,9 @@
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
+ __start___dyndbg_classes = .; \
+ KEEP(*(__dyndbg_classes)) \
+ __stop___dyndbg_classes = .; \
__start___dyndbg = .; \
KEEP(*(__dyndbg)) \
__stop___dyndbg = .; \
@@ -422,6 +433,22 @@
#endif
/*
+ * .kcfi_traps contains a list KCFI trap locations.
+ */
+#ifndef KCFI_TRAPS
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+#define KCFI_TRAPS \
+ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \
+ __start___kcfi_traps = .; \
+ KEEP(*(.kcfi_traps)) \
+ __stop___kcfi_traps = .; \
+ }
+#else
+#define KCFI_TRAPS
+#endif
+#endif
+
+/*
* Read only Data
*/
#define RO_DATA(align) \
@@ -529,6 +556,8 @@
__stop___modver = .; \
} \
\
+ KCFI_TRAPS \
+ \
RO_EXCEPTION_TABLE \
NOTES \
BTF \
@@ -538,21 +567,6 @@
/*
- * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI)
- * jump table entries.
- */
-#ifdef CONFIG_CFI_CLANG
-#define TEXT_CFI_JT \
- . = ALIGN(PMD_SIZE); \
- __cfi_jt_start = .; \
- *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
- . = ALIGN(PMD_SIZE); \
- __cfi_jt_end = .;
-#else
-#define TEXT_CFI_JT
-#endif
-
-/*
* Non-instrumentable text section
*/
#define NOINSTR_TEXT \
@@ -579,7 +593,6 @@
*(.text..refcount) \
*(.ref.text) \
*(.text.asan.* .text.tsan.*) \
- TEXT_CFI_JT \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \
@@ -1008,8 +1021,7 @@
* keep any .init_array.* sections.
* https://bugs.llvm.org/show_bug.cgi?id=46478
*/
-#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \
- defined(CONFIG_CFI_CLANG)
+#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
# ifdef CONFIG_CONSTRUCTORS
# define SANITIZER_DISCARDS \
*(.eh_frame)
@@ -1024,6 +1036,7 @@
#define COMMON_DISCARDS \
SANITIZER_DISCARDS \
+ PATCHABLE_DISCARDS \
*(.discard) \
*(.discard.*) \
*(.modinfo) \
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index b0f80cfd2a26..77eceeae708c 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -52,10 +52,6 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-/* posted mode types */
-#define OMAP_TIMER_NONPOSTED 0x00
-#define OMAP_TIMER_POSTED 0x01
-
/* timer capabilities used in hwmod database */
#define OMAP_TIMER_SECURE 0x80000000
#define OMAP_TIMER_ALWON 0x40000000
@@ -63,73 +59,13 @@
#define OMAP_TIMER_NEEDS_RESET 0x10000000
#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000
-/*
- * timer errata flags
- *
- * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
- * errata prevents us from using posted mode on these devices, unless the
- * timer counter register is never read. For more details please refer to
- * the OMAP3/4/5 errata documents.
- */
-#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
-
-struct timer_regs {
- u32 ocp_cfg;
- u32 tidr;
- u32 tier;
- u32 twer;
- u32 tclr;
- u32 tcrr;
- u32 tldr;
- u32 ttrg;
- u32 twps;
- u32 tmar;
- u32 tcar1;
- u32 tsicr;
- u32 tcar2;
- u32 tpir;
- u32 tnir;
- u32 tcvr;
- u32 tocr;
- u32 towr;
-};
-
struct omap_dm_timer {
- int id;
- int irq;
- struct clk *fclk;
-
- void __iomem *io_base;
- void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */
- void __iomem *irq_ena; /* irq enable */
- void __iomem *irq_dis; /* irq disable, only on v2 ip */
- void __iomem *pend; /* write pending */
- void __iomem *func_base; /* function register base */
-
- atomic_t enabled;
- unsigned long rate;
- unsigned reserved:1;
- unsigned posted:1;
- struct timer_regs context;
- int revision;
- u32 capability;
- u32 errata;
- struct platform_device *pdev;
- struct list_head node;
- struct notifier_block nb;
};
-int omap_dm_timer_reserve_systimer(int id);
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap);
-
int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
-int omap_dm_timer_trigger(struct omap_dm_timer *timer);
-
-int omap_dm_timers_active(void);
-
/*
* Do not use the defines below, they are not needed. They should be only
* used by dmtimer.c and sys_timer related code.
@@ -199,52 +135,4 @@ int omap_dm_timers_active(void);
#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
-/* register offsets with the write pending bit encoded */
-#define WPSHIFT 16
-
-#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
- | (WP_TCLR << WPSHIFT))
-
-#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
- | (WP_TCRR << WPSHIFT))
-
-#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
- | (WP_TLDR << WPSHIFT))
-
-#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
- | (WP_TTGR << WPSHIFT))
-
-#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
- | (WP_TMAR << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
- | (WP_TPIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
- | (WP_TNIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
- | (WP_TCVR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
- (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
- (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-
#endif /* __CLOCKSOURCE_DMTIMER_H */
diff --git a/include/crypto/aria.h b/include/crypto/aria.h
index 4a86661788e8..254da46cc385 100644
--- a/include/crypto/aria.h
+++ b/include/crypto/aria.h
@@ -32,18 +32,10 @@
#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32))
struct aria_ctx {
- int key_length;
- int rounds;
u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
-};
-
-static const u32 key_rc[5][4] = {
- { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 },
- { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 },
- { 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e },
- { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 },
- { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }
+ int rounds;
+ int key_length;
};
static const u32 s1[256] = {
@@ -458,4 +450,9 @@ static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n)
((y[(q + 2) % 4]) << (32 - r));
}
+void aria_encrypt(void *ctx, u8 *out, const u8 *in);
+void aria_decrypt(void *ctx, u8 *out, const u8 *in);
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+
#endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6..d482017f3e20 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -114,31 +114,6 @@ static inline void aead_init_queue(struct aead_queue *queue,
crypto_init_queue(&queue->base, max_qlen);
}
-static inline int aead_enqueue_request(struct aead_queue *queue,
- struct aead_request *request)
-{
- return crypto_enqueue_request(&queue->base, &request->base);
-}
-
-static inline struct aead_request *aead_dequeue_request(
- struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_dequeue_request(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
-}
-
-static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_get_backlog(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
-}
-
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
{
return alg->chunksize;
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 6407b4b61350..ccdb05f68a75 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -46,12 +46,6 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
walk->offset += nbytes;
}
-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
- unsigned int alignmask)
-{
- return !(walk->offset & alignmask);
-}
-
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
{
return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 9e3aff7e68bb..e934aab357be 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -239,6 +239,7 @@
#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1)
#define DP_DSC_REV 0x061
# define DP_DSC_MAJOR_MASK (0xf << 0)
@@ -1536,6 +1537,8 @@ enum drm_dp_phy {
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
/* 0x80+ CEA-861 infoframe types */
+#define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b
+
/**
* struct dp_sdp_header - DP secondary data packet header
* @HB0: Secondary Data Packet ID
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index db0fe9f8a612..ab55453f2d2c 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -69,6 +69,8 @@ bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SI
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
+const char *drm_dp_phy_name(enum drm_dp_phy dp_phy);
+
/**
* struct drm_dp_vsc_sdp - drm DP VSC SDP
*
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 10adec068b7f..41fd8352ab65 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -49,20 +49,6 @@ struct drm_dp_mst_topology_ref_history {
struct drm_dp_mst_branch;
/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifier
- * @vcpi: Virtual channel ID.
- * @pbn: Payload Bandwidth Number for this channel
- * @aligned_pbn: PBN aligned with slot size
- * @num_slots: number of slots for this PBN
- */
-struct drm_dp_vcpi {
- int vcpi;
- int pbn;
- int aligned_pbn;
- int num_slots;
-};
-
-/**
* struct drm_dp_mst_port - MST port
* @port_num: port number
* @input: if this port is an input port. Protected by
@@ -86,6 +72,8 @@ struct drm_dp_vcpi {
* @next: link to next port on this branch device
* @aux: i2c aux transport to talk to device connected to this port, protected
* by &drm_dp_mst_topology_mgr.base.lock.
+ * @passthrough_aux: parent aux to which DSC pass-through requests should be
+ * sent, only set if DSC pass-through is possible.
* @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port.
* @connector: DRM connector this port is connected to. Protected by
@@ -140,9 +128,9 @@ struct drm_dp_mst_port {
*/
struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */
+ struct drm_dp_aux *passthrough_aux;
struct drm_dp_mst_branch *parent;
- struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
@@ -527,35 +515,104 @@ struct drm_dp_mst_topology_cbs {
void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
};
-#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
-
-#define DP_PAYLOAD_LOCAL 1
-#define DP_PAYLOAD_REMOTE 2
-#define DP_PAYLOAD_DELETE_LOCAL 3
-
-struct drm_dp_payload {
- int payload_state;
- int start_slot;
- int num_slots;
- int vcpi;
-};
-
#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
-struct drm_dp_vcpi_allocation {
+/**
+ * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
+ *
+ * The primary atomic state structure for a given MST payload. Stores information like current
+ * bandwidth allocation, intended action for this payload, etc.
+ */
+struct drm_dp_mst_atomic_payload {
+ /** @port: The MST port assigned to this payload */
struct drm_dp_mst_port *port;
- int vcpi;
+
+ /**
+ * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
+ * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
+ * check time. This shouldn't usually matter, as the start slot should never be relevant for
+ * atomic state computations.
+ *
+ * Since this value is determined at commit time instead of check time, this value is
+ * protected by the MST helpers ensuring that async commits operating on the given topology
+ * never run in parallel. In the event that a driver does need to read this value (e.g. to
+ * inform hardware of the starting timeslot for a payload), the driver may either:
+ *
+ * * Read this field during the atomic commit after
+ * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
+ * previous MST states payload start slots have been copied over to the new state. Note
+ * that a new start slot won't be assigned/removed from this payload until
+ * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
+ * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
+ * get committed to hardware by calling drm_crtc_commit_wait() on each of the
+ * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
+ *
+ * If neither of the two above solutions suffice (e.g. the driver needs to read the start
+ * slot in the middle of an atomic commit without waiting for some reason), then drivers
+ * should cache this value themselves after changing payloads.
+ */
+ s8 vc_start_slot;
+
+ /** @vcpi: The Virtual Channel Payload Identifier */
+ u8 vcpi;
+ /**
+ * @time_slots:
+ * The number of timeslots allocated to this payload from the source DP Tx to
+ * the immediate downstream DP Rx
+ */
+ int time_slots;
+ /** @pbn: The payload bandwidth for this payload */
int pbn;
- bool dsc_enabled;
+
+ /** @delete: Whether or not we intend to delete this payload during this atomic commit */
+ bool delete : 1;
+ /** @dsc_enabled: Whether or not this payload has DSC enabled */
+ bool dsc_enabled : 1;
+
+ /** @next: The list node for this payload */
struct list_head next;
};
+/**
+ * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
+ *
+ * This struct represents the atomic state of the toplevel DisplayPort MST manager
+ */
struct drm_dp_mst_topology_state {
+ /** @base: Base private state for atomic */
struct drm_private_state base;
- struct list_head vcpis;
+
+ /** @mgr: The topology manager */
struct drm_dp_mst_topology_mgr *mgr;
+
+ /**
+ * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
+ * modify this to add additional dependencies if needed.
+ */
+ u32 pending_crtc_mask;
+ /**
+ * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
+ * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
+ */
+ struct drm_crtc_commit **commit_deps;
+ /** @num_commit_deps: The number of CRTC commits in @commit_deps */
+ size_t num_commit_deps;
+
+ /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
+ u32 payload_mask;
+ /** @payloads: The list of payloads being created/destroyed in this state */
+ struct list_head payloads;
+
+ /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
u8 total_avail_slots;
+ /** @start_slot: The first usable time slot in this topology (1 or 0) */
u8 start_slot;
+
+ /**
+ * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
+ * out itself.
+ */
+ int pbn_div;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -596,14 +653,6 @@ struct drm_dp_mst_topology_mgr {
*/
int max_payloads;
/**
- * @max_lane_count: maximum number of lanes the GPU can drive.
- */
- int max_lane_count;
- /**
- * @max_link_rate: maximum link rate per lane GPU can output, in kHz.
- */
- int max_link_rate;
- /**
* @conn_base_id: DRM connector ID this mgr is connected to. Only used
* to build the MST connector path value.
*/
@@ -646,6 +695,20 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @payload_count: The number of currently active payloads in hardware. This value is only
+ * intended to be used internally by MST helpers for payload tracking, and is only safe to
+ * read/write from the atomic commit (not check) context.
+ */
+ u8 payload_count;
+
+ /**
+ * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
+ * internally by MST helpers for payload tracking, and is only safe to read/write from the
+ * atomic commit (not check) context.
+ */
+ u8 next_start_slot;
+
+ /**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
@@ -658,10 +721,6 @@ struct drm_dp_mst_topology_mgr {
* @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
*/
u8 sink_count;
- /**
- * @pbn_div: PBN to slots divisor.
- */
- int pbn_div;
/**
* @funcs: Atomic helper callbacks
@@ -679,32 +738,6 @@ struct drm_dp_mst_topology_mgr {
struct list_head tx_msg_downq;
/**
- * @payload_lock: Protect payload information.
- */
- struct mutex payload_lock;
- /**
- * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
- * this array is determined by @max_payloads.
- */
- struct drm_dp_vcpi **proposed_vcpis;
- /**
- * @payloads: Array of payloads. The size of this array is determined
- * by @max_payloads.
- */
- struct drm_dp_payload *payloads;
- /**
- * @payload_mask: Elements of @payloads actually in use. Since
- * reallocation of active outputs isn't possible gaps can be created by
- * disabling outputs out of order compared to how they've been enabled.
- */
- unsigned long payload_mask;
- /**
- * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
- */
- unsigned long vcpi_mask;
-
- /**
* @tx_waitq: Wait to queue stall for the tx worker.
*/
wait_queue_head_t tx_waitq;
@@ -775,9 +808,7 @@ struct drm_dp_mst_topology_mgr {
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
- int max_payloads,
- int max_lane_count, int max_link_rate,
- int conn_base_id);
+ int max_payloads, int conn_base_id);
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
@@ -800,28 +831,17 @@ int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn, int slots);
-
-int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-
-
-int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
- int pbn);
-
-
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
-
-
-int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_atomic_state *state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
@@ -843,36 +863,51 @@ int drm_dp_mst_connector_late_register(struct drm_connector *connector,
void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
struct drm_dp_mst_port *port);
-struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_atomic_payload *
+drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port);
int __must_check
-drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn,
- int pbn_div);
+ struct drm_dp_mst_port *port, int pbn);
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
struct drm_dp_mst_port *port,
- int pbn, int pbn_div,
- bool enable);
+ int pbn, bool enable);
int __must_check
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int __must_check
-drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
+void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up);
int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status);
int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+static inline struct drm_dp_mst_topology_state *
+to_drm_dp_mst_topology_state(struct drm_private_state *state)
+{
+ return container_of(state, struct drm_dp_mst_topology_state, base);
+}
+
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
/**
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 2a0b17842402..06d8902a8097 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -34,12 +34,24 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
+/*
+ * Drivers that don't allow primary plane scaling may pass this macro in place
+ * of the min/max scale parameters of the plane-state checker function.
+ *
+ * Due to src being in 16.16 fixed point and dest being in integer pixels,
+ * 1<<16 represents no scaling.
+ */
+#define DRM_PLANE_NO_SCALING (1<<16)
+
struct drm_atomic_state;
struct drm_private_obj;
struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
+int
+drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
+ struct drm_connector_state *conn_state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index d434ab416ad4..6b65b0dfb4fb 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -930,6 +930,8 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_panel *panel,
u32 connector_type);
+struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
+ struct drm_panel *panel);
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
#else
static inline bool drm_bridge_is_panel(const struct drm_bridge *bridge)
@@ -947,6 +949,8 @@ static inline int drm_panel_bridge_set_orientation(struct drm_connector *connect
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
u32 port, u32 endpoint);
+struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *node,
+ u32 port, u32 endpoint);
#else
static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
struct device_node *node,
@@ -955,6 +959,14 @@ static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
#endif
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 7df7876b2ad5..56aee949c6fa 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -324,6 +324,22 @@ struct drm_monitor_range_info {
};
/**
+ * struct drm_luminance_range_info - Panel's luminance range for
+ * &drm_display_info. Calculated using data in EDID
+ *
+ * This struct is used to store a luminance range supported by panel
+ * as calculated using data from EDID's static hdr metadata.
+ *
+ * @min_luminance: This is the min supported luminance value
+ *
+ * @max_luminance: This is the max supported luminance value
+ */
+struct drm_luminance_range_info {
+ u32 min_luminance;
+ u32 max_luminance;
+};
+
+/**
* enum drm_privacy_screen_status - privacy screen status
*
* This enum is used to track and control the state of the integrated privacy
@@ -625,6 +641,11 @@ struct drm_display_info {
struct drm_monitor_range_info monitor_range;
/**
+ * @luminance_range: Luminance range supported by panel
+ */
+ struct drm_luminance_range_info luminance_range;
+
+ /**
* @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
* the DisplayID VESA vendor block. 0 for conventional Single-Stream
* Transport (SST), or 2 or 4 MSO streams.
@@ -1677,6 +1698,11 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc);
+int drmm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index ffc1cde331d3..8e1cbc75143e 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1216,6 +1216,15 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
+
+__printf(6, 7)
+int drmm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
void drm_crtc_cleanup(struct drm_crtc *crtc);
__printf(7, 8)
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 6e91a0280f31..3a09682af685 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -194,6 +194,12 @@ int drm_encoder_init(struct drm_device *dev,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
+__printf(5, 6)
+int drmm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...);
+
__printf(6, 7)
void *__drmm_encoder_alloc(struct drm_device *dev,
size_t size, size_t offset,
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_dma_helper.h
index 6447e34528f8..d5e036c57801 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_dma_helper.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_FB_CMA_HELPER_H__
-#define __DRM_FB_CMA_HELPER_H__
+#ifndef __DRM_FB_DMA_HELPER_H__
+#define __DRM_FB_DMA_HELPER_H__
#include <linux/types.h>
@@ -8,14 +8,14 @@ struct drm_device;
struct drm_framebuffer;
struct drm_plane_state;
-struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
-dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
+dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane);
-void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
+void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
struct drm_plane_state *old_state,
struct drm_plane_state *state);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index e0a73a1e2df7..d780fd151789 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -421,13 +421,4 @@ void drm_send_event_timestamp_locked(struct drm_device *dev,
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
-#ifdef CONFIG_MMU
-struct drm_vma_offset_manager;
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr);
-#endif /* CONFIG_MMU */
-
-
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 55145eca0782..eb5c98cf82b8 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -6,44 +6,51 @@
#ifndef __LINUX_DRM_FORMAT_HELPER_H
#define __LINUX_DRM_FORMAT_HELPER_H
+#include <linux/types.h>
+
+struct drm_device;
struct drm_format_info;
struct drm_framebuffer;
struct drm_rect;
+struct iosys_map;
+
unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
const struct drm_rect *clip);
-void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip);
-void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip);
-void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip,
- bool cached);
-void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip);
-void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip,
- bool swab);
-void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip);
-void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
+void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool cached);
+void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
-void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
-void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip);
-
-int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format,
- const void *vmap, const struct drm_framebuffer *fb,
- const struct drm_rect *rect);
-
-void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+
+int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *rect);
+
+void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+
+size_t drm_fb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ const u32 *extra_fourccs, size_t extra_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 22aa64d07c79..532ae78ca747 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -138,6 +138,9 @@ struct drm_format_info {
/** @is_yuv: Is it a YUV format? */
bool is_yuv;
+
+ /** @is_color_indexed: Is it a color-indexed format? */
+ bool is_color_indexed;
};
/**
@@ -313,6 +316,7 @@ unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
+unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index f67c5b7bcb68..0dcc07b68654 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -154,10 +154,10 @@ struct drm_framebuffer {
* drm_mode_fb_cmd2.
*
* Note that this is a linear offset and does not take into account
- * tiling or buffer laytou per @modifier. It meant to be used when the
- * actual pixel data for this framebuffer plane starts at an offset,
- * e.g. when multiple planes are allocated within the same backing
- * storage buffer object. For tiled layouts this generally means it
+ * tiling or buffer layout per @modifier. It is meant to be used when
+ * the actual pixel data for this framebuffer plane starts at an offset,
+ * e.g. when multiple planes are allocated within the same backing
+ * storage buffer object. For tiled layouts this generally means its
* @offsets must at least be tile-size aligned, but hardware often has
* stricter requirements.
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 87cffc9efa85..bd42f25e449c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -175,6 +175,41 @@ struct drm_gem_object_funcs {
};
/**
+ * struct drm_gem_lru - A simple LRU helper
+ *
+ * A helper for tracking GEM objects in a given state, to aid in
+ * driver's shrinker implementation. Tracks the count of pages
+ * for lockless &shrinker.count_objects, and provides
+ * &drm_gem_lru_scan for driver's &shrinker.scan_objects
+ * implementation.
+ */
+struct drm_gem_lru {
+ /**
+ * @lock:
+ *
+ * Lock protecting movement of GEM objects between LRUs. All
+ * LRUs that the object can move between should be protected
+ * by the same lock.
+ */
+ struct mutex *lock;
+
+ /**
+ * @count:
+ *
+ * The total number of backing pages of the GEM objects in
+ * this LRU.
+ */
+ long count;
+
+ /**
+ * @list:
+ *
+ * The LRU list.
+ */
+ struct list_head list;
+};
+
+/**
* struct drm_gem_object - GEM buffer object
*
* This structure defines the generic parts for GEM buffer objects, which are
@@ -217,7 +252,7 @@ struct drm_gem_object {
*
* SHMEM file node used as backing storage for swappable buffer objects.
* GEM also supports driver private objects with driver-specific backing
- * storage (contiguous CMA memory, special reserved blocks). In this
+ * storage (contiguous DMA memory, special reserved blocks). In this
* case @filp is NULL.
*/
struct file *filp;
@@ -312,6 +347,20 @@ struct drm_gem_object {
*
*/
const struct drm_gem_object_funcs *funcs;
+
+ /**
+ * @lru_node:
+ *
+ * List node in a &drm_gem_lru.
+ */
+ struct list_head lru_node;
+
+ /**
+ * @lru:
+ *
+ * The current LRU list that the GEM object is on.
+ */
+ struct drm_gem_lru *lru;
};
/**
@@ -420,4 +469,10 @@ void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
+void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+void drm_gem_lru_remove(struct drm_gem_object *obj);
+void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ bool (*shrink)(struct drm_gem_object *obj));
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_dma_helper.h
index fbda4ce5d5fb..8a043235dad8 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_GEM_CMA_HELPER_H__
-#define __DRM_GEM_CMA_HELPER_H__
+#ifndef __DRM_GEM_DMA_HELPER_H__
+#define __DRM_GEM_DMA_HELPER_H__
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -9,128 +9,128 @@
struct drm_mode_create_dumb;
/**
- * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+ * struct drm_gem_dma_object - GEM object backed by DMA memory allocations
* @base: base GEM object
- * @paddr: physical address of the backing memory
+ * @dma_addr: DMA address of the backing memory
* @sgt: scatter/gather table for imported PRIME buffers. The table can have
* more than one entry but they are guaranteed to have contiguous
* DMA addresses.
* @vaddr: kernel virtual address of the backing memory
* @map_noncoherent: if true, the GEM object is backed by non-coherent memory
*/
-struct drm_gem_cma_object {
+struct drm_gem_dma_object {
struct drm_gem_object base;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
struct sg_table *sgt;
- /* For objects with DMA memory allocated by GEM CMA */
+ /* For objects with DMA memory allocated by GEM DMA */
void *vaddr;
bool map_noncoherent;
};
-#define to_drm_gem_cma_obj(gem_obj) \
- container_of(gem_obj, struct drm_gem_cma_object, base)
+#define to_drm_gem_dma_obj(gem_obj) \
+ container_of(gem_obj, struct drm_gem_dma_object, base)
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
size_t size);
-void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj);
-void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj);
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
struct drm_printer *p, unsigned int indent);
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj);
-int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj);
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
struct iosys_map *map);
-int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma);
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
-extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+extern const struct vm_operations_struct drm_gem_dma_vm_ops;
/*
* GEM object functions
*/
/**
- * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free()
+ * drm_gem_dma_object_free - GEM object function for drm_gem_dma_free()
* @obj: GEM object to free
*
- * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers
+ * This function wraps drm_gem_dma_free_object(). Drivers that employ the DMA helpers
* should use it as their &drm_gem_object_funcs.free handler.
*/
-static inline void drm_gem_cma_object_free(struct drm_gem_object *obj)
+static inline void drm_gem_dma_object_free(struct drm_gem_object *obj)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
- drm_gem_cma_free(cma_obj);
+ drm_gem_dma_free(dma_obj);
}
/**
- * drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs
+ * drm_gem_dma_object_print_info() - Print &drm_gem_dma_object info for debugfs
* @p: DRM printer
* @indent: Tab indentation level
* @obj: GEM object
*
- * This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers
+ * This function wraps drm_gem_dma_print_info(). Drivers that employ the DMA helpers
* should use this function as their &drm_gem_object_funcs.print_info handler.
*/
-static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent,
+static inline void drm_gem_dma_object_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj)
{
- const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ const struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
- drm_gem_cma_print_info(cma_obj, p, indent);
+ drm_gem_dma_print_info(dma_obj, p, indent);
}
/**
- * drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table()
+ * drm_gem_dma_object_get_sg_table - GEM object function for drm_gem_dma_get_sg_table()
* @obj: GEM object
*
- * This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should
+ * This function wraps drm_gem_dma_get_sg_table(). Drivers that employ the DMA helpers should
* use it as their &drm_gem_object_funcs.get_sg_table handler.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj)
+static inline struct sg_table *drm_gem_dma_object_get_sg_table(struct drm_gem_object *obj)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
- return drm_gem_cma_get_sg_table(cma_obj);
+ return drm_gem_dma_get_sg_table(dma_obj);
}
/*
- * drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap()
+ * drm_gem_dma_object_vmap - GEM object function for drm_gem_dma_vmap()
* @obj: GEM object
- * @map: Returns the kernel virtual address of the CMA GEM object's backing store.
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing store.
*
- * This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should
+ * This function wraps drm_gem_dma_vmap(). Drivers that employ the DMA helpers should
* use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj,
+static inline int drm_gem_dma_object_vmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
- return drm_gem_cma_vmap(cma_obj, map);
+ return drm_gem_dma_vmap(dma_obj, map);
}
/**
- * drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap()
+ * drm_gem_dma_object_mmap - GEM object function for drm_gem_dma_mmap()
* @obj: GEM object
* @vma: VMA for the area to be mapped
*
- * This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should
+ * This function wraps drm_gem_dma_mmap(). Drivers that employ the dma helpers should
* use it as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
- return drm_gem_cma_mmap(cma_obj, vma);
+ return drm_gem_dma_mmap(dma_obj, vma);
}
/*
@@ -138,57 +138,57 @@ static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_
*/
/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args);
/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args);
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
/**
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE - DMA GEM driver operations
* @dumb_create_func: callback function for .dumb_create
*
* This macro provides a shortcut for setting the default GEM operations in the
* &drm_driver structure.
*
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS for drivers that
* override the default implementation of &struct rm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
* on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
*/
-#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
+#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
.dumb_create = (dumb_create_func), \
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
.gem_prime_mmap = drm_gem_prime_mmap
/**
- * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
+ * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
*
* This macro provides a shortcut for setting the default GEM operations in the
* &drm_driver structure.
*
* Drivers that come with their own implementation of
* &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead.
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use DRM_GEM_DMA_DRIVER_OPS_VMAP instead.
*/
-#define DRM_GEM_CMA_DRIVER_OPS \
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
+#define DRM_GEM_DMA_DRIVER_OPS \
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - DMA GEM driver operations
* ensuring a virtual address
* on the buffer
* @dumb_create_func: callback function for .dumb_create
@@ -197,21 +197,21 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
* &drm_driver structure for drivers that need the virtual address also on
* imported buffers.
*
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS_VMAP for drivers that
* override the default implementation of &struct drm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
* virtual address on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
*/
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
.dumb_create = dumb_create_func, \
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
.gem_prime_mmap = drm_gem_prime_mmap
/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
* address on the buffer
*
* This macro provides a shortcut for setting the default GEM operations in the
@@ -220,16 +220,16 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
*
* Drivers that come with their own implementation of
* &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use DRM_GEM_DMA_DRIVER_OPS
* instead.
*/
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP \
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP \
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *drm,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -238,22 +238,22 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
*/
#ifndef CONFIG_MMU
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags);
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- .get_unmapped_area = drm_gem_cma_get_unmapped_area,
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ .get_unmapped_area = drm_gem_dma_get_unmapped_area,
#else
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS
#endif
/**
- * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
+ * DEFINE_DRM_GEM_DMA_FOPS() - macro to generate file operations for DMA drivers
* @name: name for the generated structure
*
- * This macro autogenerates a suitable &struct file_operations for CMA based
+ * This macro autogenerates a suitable &struct file_operations for DMA based
* drivers, which can be assigned to &drm_driver.fops. Note that this structure
* cannot be shared between drivers, because it contains a reference to the
* current module using THIS_MODULE.
@@ -262,7 +262,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
* non-static version of this you're probably doing it wrong and will break the
* THIS_MODULE reference by accident.
*/
-#define DEFINE_DRM_GEM_CMA_FOPS(name) \
+#define DEFINE_DRM_GEM_DMA_FOPS(name) \
static const struct file_operations name = {\
.owner = THIS_MODULE,\
.open = drm_open,\
@@ -273,7 +273,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
- DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
}
-#endif /* __DRM_GEM_CMA_HELPER_H__ */
+#endif /* __DRM_GEM_DMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index d0a57853c188..a2201b2488c5 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -210,7 +210,7 @@ static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
* use it as their &drm_gem_object_funcs.get_sg_table handler.
*
* Returns:
- * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
*/
static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
{
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index dad2f187b64b..14eaecb1825c 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -155,6 +155,8 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation);
+enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode);
void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 91a164bdd8f3..20b21b577dea 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -179,6 +179,7 @@ struct mipi_dsi_device_info {
* @lp_rate: maximum lane frequency for low power mode in hertz, this should
* be set to the real limits of the hardware, zero is only accepted for
* legacy drivers
+ * @dsc: panel/bridge DSC pps payload to be sent
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@@ -191,6 +192,7 @@ struct mipi_dsi_device {
unsigned long mode_flags;
unsigned long hs_rate;
unsigned long lp_rate;
+ struct drm_dsc_config *dsc;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
@@ -322,7 +324,7 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
struct mipi_dsi_driver {
struct device_driver driver;
int(*probe)(struct mipi_dsi_device *dsi);
- int(*remove)(struct mipi_dsi_device *dsi);
+ void (*remove)(struct mipi_dsi_device *dsi);
void (*shutdown)(struct mipi_dsi_device *dsi);
};
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index a80ae9639e96..b0c680e6f670 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -139,6 +139,35 @@ enum drm_mode_status {
.vscan = (vs), .flags = (f)
/**
+ * DRM_MODE_RES_MM - Calculates the display size from resolution and DPI
+ * @res: The resolution in pixel
+ * @dpi: The number of dots per inch
+ */
+#define DRM_MODE_RES_MM(res, dpi) \
+ (((res) * 254ul) / ((dpi) * 10ul))
+
+#define __DRM_MODE_INIT(pix, hd, vd, hd_mm, vd_mm) \
+ .type = DRM_MODE_TYPE_DRIVER, .clock = (pix), \
+ .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
+ .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
+ .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
+ .height_mm = (vd_mm)
+
+/**
+ * DRM_MODE_INIT - Initialize display mode
+ * @hz: Vertical refresh rate in Hertz
+ * @hd: Horizontal resolution, width
+ * @vd: Vertical resolution, height
+ * @hd_mm: Display width in millimeters
+ * @vd_mm: Display height in millimeters
+ *
+ * This macro initializes a &drm_display_mode that contains information about
+ * refresh rate, resolution and physical size.
+ */
+#define DRM_MODE_INIT(hz, hd, vd, hd_mm, vd_mm) \
+ __DRM_MODE_INIT((hd) * (vd) * (hz) / 1000 /* kHz */, hd, vd, hd_mm, vd_mm)
+
+/**
* DRM_SIMPLE_MODE - Simple display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -149,11 +178,7 @@ enum drm_mode_status {
* resolution and physical size.
*/
#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \
- .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \
- .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
- .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
- .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
- .height_mm = (vd_mm)
+ __DRM_MODE_INIT(1 /* pass validation */, hd, vd, hd_mm, vd_mm)
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 3a271128c078..994bfcdd84c5 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -188,13 +188,6 @@ struct drm_panel {
* Panel entry in registry.
*/
struct list_head list;
-
- /**
- * @dsc:
- *
- * Panel DSC pps payload to be sent
- */
- struct drm_dsc_config *dsc;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 89ea54652e87..447e664e49d5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -631,7 +631,7 @@ struct drm_plane {
unsigned int format_count;
/**
* @format_default: driver hasn't supplied supported formats for the
- * plane. Used by the drm_plane_init compatibility wrapper only.
+ * plane. Used by the non-atomic driver compatibility wrapper only.
*/
bool format_default;
@@ -762,12 +762,6 @@ int drm_universal_plane_init(struct drm_device *dev,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...);
-int drm_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary);
void drm_plane_cleanup(struct drm_plane *plane);
__printf(10, 11)
@@ -815,6 +809,50 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev,
format_count, format_modifiers, \
plane_type, name, ##__VA_ARGS__))
+__printf(10, 11)
+void *__drm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drm_universal_plane_alloc() - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. The caller
+ * is responsible for releasing the allocated memory with kfree().
+ *
+ * Drivers are encouraged to use drmm_universal_plane_alloc() instead.
+ *
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 331ebd60b3a3..ff83d2621687 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -24,21 +24,35 @@
#ifndef DRM_PLANE_HELPER_H
#define DRM_PLANE_HELPER_H
-#include <drm/drm_rect.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+#include <linux/types.h>
-/*
- * Drivers that don't allow primary plane scaling may pass this macro in place
- * of the min/max scale parameters of the update checker function.
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+
+int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable_primary(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
+void drm_plane_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state);
+
+/**
+ * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
*
- * Due to src being in 16.16 fixed point and dest being in integer pixels,
- * 1<<16 represents no scaling.
+ * This macro initializes plane functions for non-atomic drivers to default
+ * values. Non-atomic interfaces are deprecated and should not be used in new
+ * drivers.
*/
-#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-
-void drm_primary_helper_destroy(struct drm_plane *plane);
-extern const struct drm_plane_funcs drm_primary_helper_funcs;
+#define DRM_PLANE_NON_ATOMIC_FUNCS \
+ .update_plane = drm_plane_helper_update_primary, \
+ .disable_plane = drm_plane_helper_disable_primary, \
+ .destroy = drm_plane_helper_destroy
#endif
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 22fabdeed297..a44fb7ef257f 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -31,11 +31,12 @@
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/debugfs.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
/* Do *not* use outside of drm_print.[ch]! */
-extern unsigned int __drm_debug;
+extern unsigned long __drm_debug;
/**
* DOC: print
@@ -275,55 +276,75 @@ static inline struct drm_printer drm_err_printer(const char *prefix)
*
*/
enum drm_debug_category {
+ /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */
/**
* @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
* drm_memory.c, ...
*/
- DRM_UT_CORE = 0x01,
+ DRM_UT_CORE,
/**
* @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
* radeon, ... macro.
*/
- DRM_UT_DRIVER = 0x02,
+ DRM_UT_DRIVER,
/**
* @DRM_UT_KMS: Used in the modesetting code.
*/
- DRM_UT_KMS = 0x04,
+ DRM_UT_KMS,
/**
* @DRM_UT_PRIME: Used in the prime code.
*/
- DRM_UT_PRIME = 0x08,
+ DRM_UT_PRIME,
/**
* @DRM_UT_ATOMIC: Used in the atomic code.
*/
- DRM_UT_ATOMIC = 0x10,
+ DRM_UT_ATOMIC,
/**
* @DRM_UT_VBL: Used for verbose debug message in the vblank code.
*/
- DRM_UT_VBL = 0x20,
+ DRM_UT_VBL,
/**
* @DRM_UT_STATE: Used for verbose atomic state debugging.
*/
- DRM_UT_STATE = 0x40,
+ DRM_UT_STATE,
/**
* @DRM_UT_LEASE: Used in the lease code.
*/
- DRM_UT_LEASE = 0x80,
+ DRM_UT_LEASE,
/**
* @DRM_UT_DP: Used in the DP code.
*/
- DRM_UT_DP = 0x100,
+ DRM_UT_DP,
/**
* @DRM_UT_DRMRES: Used in the drm managed resources code.
*/
- DRM_UT_DRMRES = 0x200,
+ DRM_UT_DRMRES
};
-static inline bool drm_debug_enabled(enum drm_debug_category category)
+static inline bool drm_debug_enabled_raw(enum drm_debug_category category)
{
- return unlikely(__drm_debug & category);
+ return unlikely(__drm_debug & BIT(category));
}
+#define drm_debug_enabled_instrumented(category) \
+ ({ \
+ pr_debug("todo: is this frequent enough to optimize ?\n"); \
+ drm_debug_enabled_raw(category); \
+ })
+
+#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+/*
+ * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets
+ * a descriptor, and only enabled callsites are reachable. They use
+ * the private macro to avoid re-testing the enable-bit.
+ */
+#define __drm_debug_enabled(category) true
+#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category)
+#else
+#define __drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#define drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#endif
+
/*
* struct device based logging
*
@@ -333,9 +354,10 @@ static inline bool drm_debug_enabled(enum drm_debug_category category)
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
-__printf(3, 4)
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...);
+struct _ddebug;
+__printf(4, 5)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...);
/**
* DRM_DEV_ERROR() - Error output.
@@ -383,6 +405,15 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
} \
})
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__)
+#else
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \
+ dev, cat, fmt, ##__VA_ARGS__)
+#endif
+
/**
* DRM_DEV_DEBUG() - Debug output for generic drm code
*
@@ -457,7 +488,7 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
#define drm_dbg_core(drm, fmt, ...) \
drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg(drm, fmt, ...) \
+#define drm_dbg_driver(drm, fmt, ...) \
drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
@@ -476,6 +507,7 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
#define drm_dbg_drmres(drm, fmt, ...) \
drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
/*
* printk based logging
@@ -483,11 +515,19 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
* Prefer drm_device based logging over device or prink based logging.
*/
-__printf(2, 3)
-void __drm_dbg(enum drm_debug_category category, const char *format, ...);
+__printf(3, 4)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...);
__printf(1, 2)
void __drm_err(const char *format, ...);
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define __drm_dbg(fmt, ...) ___drm_dbg(NULL, fmt, ##__VA_ARGS__)
+#else
+#define __drm_dbg(cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, ___drm_dbg, \
+ cat, fmt, ##__VA_ARGS__)
+#endif
+
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8075e02aa865..5880daa14624 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -3,9 +3,10 @@
#ifndef __DRM_PROBE_HELPER_H__
#define __DRM_PROBE_HELPER_H__
-#include <linux/types.h>
+#include <drm/drm_modes.h>
struct drm_connector;
+struct drm_crtc;
struct drm_device;
struct drm_modeset_acquire_ctx;
@@ -26,7 +27,13 @@ void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
+enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *fixed_mode);
+
int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector);
+int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
+ const struct drm_display_mode *fixed_mode);
int drm_connector_helper_get_modes(struct drm_connector *connector);
#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index addb135eeea6..599855c6a672 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -329,10 +329,10 @@ enum drm_gpu_sched_stat {
};
/**
- * struct drm_sched_backend_ops
+ * struct drm_sched_backend_ops - Define the backend operations
+ * called by the scheduler
*
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side.
+ * These functions should be implemented in the driver side.
*/
struct drm_sched_backend_ops {
/**
@@ -409,7 +409,7 @@ struct drm_sched_backend_ops {
};
/**
- * struct drm_gpu_scheduler
+ * struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
* @hw_submission_limit: the max size of the hardware queue.
@@ -435,6 +435,7 @@ struct drm_sched_backend_ops {
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 278031aa2e84..4a4c190f7698 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -641,6 +641,7 @@
INTEL_VGA_DEVICE(0x4682, info), \
INTEL_VGA_DEVICE(0x4688, info), \
INTEL_VGA_DEVICE(0x468A, info), \
+ INTEL_VGA_DEVICE(0x468B, info), \
INTEL_VGA_DEVICE(0x4690, info), \
INTEL_VGA_DEVICE(0x4692, info), \
INTEL_VGA_DEVICE(0x4693, info)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2d524f8b0802..44a538ee5e2a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -317,93 +317,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
-/**
- * ttm_bo_init_reserved
- *
- * @bdev: Pointer to a ttm_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement for buffer object.
- * @page_alignment: Data alignment in pages.
- * @ctx: TTM operation context for memory allocation.
- * @sg: Scatter-gather table.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
- * and it is the caller's responsibility to call ttm_bo_unreserve.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-
-int ttm_bo_init_reserved(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy) (struct ttm_buffer_object *));
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_bo_init
- *
- * @bdev: Pointer to a ttm_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement for buffer object.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep to wait for GPU resources,
- * sleep interruptible.
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
- * @sg: Scatter-gather table.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
- size_t size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_kmap_obj_virtual
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 897b88f0bd59..1afa891f488a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -106,7 +106,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
- int ret = 0;
+ int ret;
if (no_wait) {
bool success;
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index ca89a48c2460..5afc6d664fde 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -89,6 +89,38 @@ struct ttm_resource_manager_func {
struct ttm_resource *res);
/**
+ * struct ttm_resource_manager_func member intersects
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res intersects with @place + @size. Used to judge if
+ * evictions are valueable or not.
+ */
+ bool (*intersects)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
+ * struct ttm_resource_manager_func member compatible
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res compatible with @place + @size. Used to check of
+ * the need to move the backing store or not.
+ */
+ bool (*compatible)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
* struct ttm_resource_manager_func member debug
*
* @man: Pointer to a memory type manager.
@@ -329,6 +361,14 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool ttm_resource_compatible(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
bool ttm_resource_compat(struct ttm_resource *res,
struct ttm_placement *placement);
void ttm_resource_set_bo(struct ttm_resource *res,
diff --git a/include/dt-bindings/ata/ahci.h b/include/dt-bindings/ata/ahci.h
new file mode 100644
index 000000000000..77997b35612c
--- /dev/null
+++ b/include/dt-bindings/ata/ahci.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause */
+/*
+ * This header provides constants for most AHCI bindings.
+ */
+
+#ifndef _DT_BINDINGS_ATA_AHCI_H
+#define _DT_BINDINGS_ATA_AHCI_H
+
+/* Host Bus Adapter generic platform capabilities */
+#define HBA_SSS (1 << 27)
+#define HBA_SMPS (1 << 28)
+
+/* Host Bus Adapter port-specific platform capabilities */
+#define HBA_PORT_HPCP (1 << 18)
+#define HBA_PORT_MPSP (1 << 19)
+#define HBA_PORT_CPD (1 << 20)
+#define HBA_PORT_ESP (1 << 21)
+#define HBA_PORT_FBSCP (1 << 22)
+
+#endif
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 9ff4f6e4558c..06d568382c77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -52,5 +52,6 @@
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
#define ASPEED_RESET_CRT1 9
+#define ASPEED_RESET_HACE 10
#endif
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 62b9520a00fd..d8b0db2f7a7d 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -111,6 +111,7 @@
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
#define ASPEED_RESET_PCI_DP 5
+#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
#define ASPEED_RESET_SDRAM 0
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index 0b6a3c6a7c90..88d5289883d3 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -58,7 +58,34 @@
#define CLK_MOUT_CLKCMU_APM_BUS 46
#define CLK_DOUT_CLKCMU_APM_BUS 47
#define CLK_GOUT_CLKCMU_APM_BUS 48
-#define TOP_NR_CLK 49
+#define CLK_MOUT_AUD 49
+#define CLK_GOUT_AUD 50
+#define CLK_DOUT_AUD 51
+#define CLK_MOUT_IS_BUS 52
+#define CLK_MOUT_IS_ITP 53
+#define CLK_MOUT_IS_VRA 54
+#define CLK_MOUT_IS_GDC 55
+#define CLK_GOUT_IS_BUS 56
+#define CLK_GOUT_IS_ITP 57
+#define CLK_GOUT_IS_VRA 58
+#define CLK_GOUT_IS_GDC 59
+#define CLK_DOUT_IS_BUS 60
+#define CLK_DOUT_IS_ITP 61
+#define CLK_DOUT_IS_VRA 62
+#define CLK_DOUT_IS_GDC 63
+#define CLK_MOUT_MFCMSCL_MFC 64
+#define CLK_MOUT_MFCMSCL_M2M 65
+#define CLK_MOUT_MFCMSCL_MCSC 66
+#define CLK_MOUT_MFCMSCL_JPEG 67
+#define CLK_GOUT_MFCMSCL_MFC 68
+#define CLK_GOUT_MFCMSCL_M2M 69
+#define CLK_GOUT_MFCMSCL_MCSC 70
+#define CLK_GOUT_MFCMSCL_JPEG 71
+#define CLK_DOUT_MFCMSCL_MFC 72
+#define CLK_DOUT_MFCMSCL_M2M 73
+#define CLK_DOUT_MFCMSCL_MCSC 74
+#define CLK_DOUT_MFCMSCL_JPEG 75
+#define TOP_NR_CLK 76
/* CMU_APM */
#define CLK_RCO_I3C_PMIC 1
@@ -87,6 +114,69 @@
#define CLK_GOUT_SYSREG_APM_PCLK 24
#define APM_NR_CLK 25
+/* CMU_AUD */
+#define CLK_DOUT_AUD_AUDIF 1
+#define CLK_DOUT_AUD_BUSD 2
+#define CLK_DOUT_AUD_BUSP 3
+#define CLK_DOUT_AUD_CNT 4
+#define CLK_DOUT_AUD_CPU 5
+#define CLK_DOUT_AUD_CPU_ACLK 6
+#define CLK_DOUT_AUD_CPU_PCLKDBG 7
+#define CLK_DOUT_AUD_FM 8
+#define CLK_DOUT_AUD_FM_SPDY 9
+#define CLK_DOUT_AUD_MCLK 10
+#define CLK_DOUT_AUD_UAIF0 11
+#define CLK_DOUT_AUD_UAIF1 12
+#define CLK_DOUT_AUD_UAIF2 13
+#define CLK_DOUT_AUD_UAIF3 14
+#define CLK_DOUT_AUD_UAIF4 15
+#define CLK_DOUT_AUD_UAIF5 16
+#define CLK_DOUT_AUD_UAIF6 17
+#define CLK_FOUT_AUD_PLL 18
+#define CLK_GOUT_AUD_ABOX_ACLK 19
+#define CLK_GOUT_AUD_ASB_CCLK 20
+#define CLK_GOUT_AUD_CA32_CCLK 21
+#define CLK_GOUT_AUD_CNT_BCLK 22
+#define CLK_GOUT_AUD_CODEC_MCLK 23
+#define CLK_GOUT_AUD_DAP_CCLK 24
+#define CLK_GOUT_AUD_GPIO_PCLK 25
+#define CLK_GOUT_AUD_PPMU_ACLK 26
+#define CLK_GOUT_AUD_PPMU_PCLK 27
+#define CLK_GOUT_AUD_SPDY_BCLK 28
+#define CLK_GOUT_AUD_SYSMMU_CLK 29
+#define CLK_GOUT_AUD_SYSREG_PCLK 30
+#define CLK_GOUT_AUD_TZPC_PCLK 31
+#define CLK_GOUT_AUD_UAIF0_BCLK 32
+#define CLK_GOUT_AUD_UAIF1_BCLK 33
+#define CLK_GOUT_AUD_UAIF2_BCLK 34
+#define CLK_GOUT_AUD_UAIF3_BCLK 35
+#define CLK_GOUT_AUD_UAIF4_BCLK 36
+#define CLK_GOUT_AUD_UAIF5_BCLK 37
+#define CLK_GOUT_AUD_UAIF6_BCLK 38
+#define CLK_GOUT_AUD_WDT_PCLK 39
+#define CLK_MOUT_AUD_CPU 40
+#define CLK_MOUT_AUD_CPU_HCH 41
+#define CLK_MOUT_AUD_CPU_USER 42
+#define CLK_MOUT_AUD_FM 43
+#define CLK_MOUT_AUD_PLL 44
+#define CLK_MOUT_AUD_TICK_USB_USER 45
+#define CLK_MOUT_AUD_UAIF0 46
+#define CLK_MOUT_AUD_UAIF1 47
+#define CLK_MOUT_AUD_UAIF2 48
+#define CLK_MOUT_AUD_UAIF3 49
+#define CLK_MOUT_AUD_UAIF4 50
+#define CLK_MOUT_AUD_UAIF5 51
+#define CLK_MOUT_AUD_UAIF6 52
+#define IOCLK_AUDIOCDCLK0 53
+#define IOCLK_AUDIOCDCLK1 54
+#define IOCLK_AUDIOCDCLK2 55
+#define IOCLK_AUDIOCDCLK3 56
+#define IOCLK_AUDIOCDCLK4 57
+#define IOCLK_AUDIOCDCLK5 58
+#define IOCLK_AUDIOCDCLK6 59
+#define TICK_USB 60
+#define AUD_NR_CLK 61
+
/* CMU_CMGP */
#define CLK_RCO_CMGP 1
#define CLK_MOUT_CMGP_ADC 2
@@ -121,6 +211,50 @@
#define CLK_GOUT_SYSREG_HSI_PCLK 13
#define HSI_NR_CLK 14
+/* CMU_IS */
+#define CLK_MOUT_IS_BUS_USER 1
+#define CLK_MOUT_IS_ITP_USER 2
+#define CLK_MOUT_IS_VRA_USER 3
+#define CLK_MOUT_IS_GDC_USER 4
+#define CLK_DOUT_IS_BUSP 5
+#define CLK_GOUT_IS_CMU_IS_PCLK 6
+#define CLK_GOUT_IS_CSIS0_ACLK 7
+#define CLK_GOUT_IS_CSIS1_ACLK 8
+#define CLK_GOUT_IS_CSIS2_ACLK 9
+#define CLK_GOUT_IS_TZPC_PCLK 10
+#define CLK_GOUT_IS_CSIS_DMA_CLK 11
+#define CLK_GOUT_IS_GDC_CLK 12
+#define CLK_GOUT_IS_IPP_CLK 13
+#define CLK_GOUT_IS_ITP_CLK 14
+#define CLK_GOUT_IS_MCSC_CLK 15
+#define CLK_GOUT_IS_VRA_CLK 16
+#define CLK_GOUT_IS_PPMU_IS0_ACLK 17
+#define CLK_GOUT_IS_PPMU_IS0_PCLK 18
+#define CLK_GOUT_IS_PPMU_IS1_ACLK 19
+#define CLK_GOUT_IS_PPMU_IS1_PCLK 20
+#define CLK_GOUT_IS_SYSMMU_IS0_CLK 21
+#define CLK_GOUT_IS_SYSMMU_IS1_CLK 22
+#define CLK_GOUT_IS_SYSREG_PCLK 23
+#define IS_NR_CLK 24
+
+/* CMU_MFCMSCL */
+#define CLK_MOUT_MFCMSCL_MFC_USER 1
+#define CLK_MOUT_MFCMSCL_M2M_USER 2
+#define CLK_MOUT_MFCMSCL_MCSC_USER 3
+#define CLK_MOUT_MFCMSCL_JPEG_USER 4
+#define CLK_DOUT_MFCMSCL_BUSP 5
+#define CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK 6
+#define CLK_GOUT_MFCMSCL_TZPC_PCLK 7
+#define CLK_GOUT_MFCMSCL_JPEG_ACLK 8
+#define CLK_GOUT_MFCMSCL_M2M_ACLK 9
+#define CLK_GOUT_MFCMSCL_MCSC_CLK 10
+#define CLK_GOUT_MFCMSCL_MFC_ACLK 11
+#define CLK_GOUT_MFCMSCL_PPMU_ACLK 12
+#define CLK_GOUT_MFCMSCL_PPMU_PCLK 13
+#define CLK_GOUT_MFCMSCL_SYSMMU_CLK 14
+#define CLK_GOUT_MFCMSCL_SYSREG_PCLK 15
+#define MFCMSCL_NR_CLK 16
+
/* CMU_PERI */
#define CLK_MOUT_PERI_BUS_USER 1
#define CLK_MOUT_PERI_UART_USER 2
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index 47c6f7f9582c..1f768b2eeb1a 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -281,7 +281,6 @@
#define IMX8MM_CLK_CLKOUT2_DIV 256
#define IMX8MM_CLK_CLKOUT2 257
-
#define IMX8MM_CLK_END 258
#endif
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
index 21fda9c5cb5e..19bc32788d81 100644
--- a/include/dt-bindings/clock/imx93-clock.h
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -196,6 +196,13 @@
#define IMX93_CLK_TMC_GATE 187
#define IMX93_CLK_PMRO_GATE 188
#define IMX93_CLK_32K 189
-#define IMX93_CLK_END 190
+#define IMX93_CLK_SAI1_IPG 190
+#define IMX93_CLK_SAI2_IPG 191
+#define IMX93_CLK_SAI3_IPG 192
+#define IMX93_CLK_MU1_A_GATE 193
+#define IMX93_CLK_MU1_B_GATE 194
+#define IMX93_CLK_MU2_A_GATE 195
+#define IMX93_CLK_MU2_B_GATE 196
+#define IMX93_CLK_END 197
#endif
diff --git a/include/dt-bindings/clk/lochnagar.h b/include/dt-bindings/clock/lochnagar.h
index 8fa20551ff17..8fa20551ff17 100644
--- a/include/dt-bindings/clk/lochnagar.h
+++ b/include/dt-bindings/clock/lochnagar.h
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index db2b41f1b127..c92d969ae941 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -20,8 +20,11 @@
#define PXA168_CLK_PLL1_2_1_5 19
#define PXA168_CLK_PLL1_3_16 20
#define PXA168_CLK_PLL1_192 21
+#define PXA168_CLK_PLL1_2_1_10 22
+#define PXA168_CLK_PLL1_2_3_16 23
#define PXA168_CLK_UART_PLL 27
#define PXA168_CLK_USB_PLL 28
+#define PXA168_CLK_CLK32_2 50
/* apb peripherals */
#define PXA168_CLK_TWSI0 60
@@ -56,6 +59,9 @@
#define PXA168_CLK_CCIC0 107
#define PXA168_CLK_CCIC0_PHY 108
#define PXA168_CLK_CCIC0_SPHY 109
+#define PXA168_CLK_SDH3 110
+#define PXA168_CLK_SDH01_AXI 111
+#define PXA168_CLK_SDH23_AXI 112
#define PXA168_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6795-clk.h b/include/dt-bindings/clock/mediatek,mt6795-clk.h
new file mode 100644
index 000000000000..9902906ac902
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6795-clk.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT6795_H
+#define _DT_BINDINGS_CLK_MT6795_H
+
+/* TOPCKGEN */
+#define CLK_TOP_ADSYS_26M 0
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_USB_SYSPLL_125M 2
+#define CLK_TOP_DSI0_DIG 3
+#define CLK_TOP_DSI1_DIG 4
+#define CLK_TOP_ARMCA53PLL_754M 5
+#define CLK_TOP_ARMCA53PLL_502M 6
+#define CLK_TOP_MAIN_H546M 7
+#define CLK_TOP_MAIN_H364M 8
+#define CLK_TOP_MAIN_H218P4M 9
+#define CLK_TOP_MAIN_H156M 10
+#define CLK_TOP_TVDPLL_445P5M 11
+#define CLK_TOP_TVDPLL_594M 12
+#define CLK_TOP_UNIV_624M 13
+#define CLK_TOP_UNIV_416M 14
+#define CLK_TOP_UNIV_249P6M 15
+#define CLK_TOP_UNIV_178P3M 16
+#define CLK_TOP_UNIV_48M 17
+#define CLK_TOP_CLKRTC_EXT 18
+#define CLK_TOP_CLKRTC_INT 19
+#define CLK_TOP_FPC 20
+#define CLK_TOP_HDMITXPLL_D2 21
+#define CLK_TOP_HDMITXPLL_D3 22
+#define CLK_TOP_ARMCA53PLL_D2 23
+#define CLK_TOP_ARMCA53PLL_D3 24
+#define CLK_TOP_APLL1 25
+#define CLK_TOP_APLL2 26
+#define CLK_TOP_DMPLL 27
+#define CLK_TOP_DMPLL_D2 28
+#define CLK_TOP_DMPLL_D4 29
+#define CLK_TOP_DMPLL_D8 30
+#define CLK_TOP_DMPLL_D16 31
+#define CLK_TOP_MMPLL 32
+#define CLK_TOP_MMPLL_D2 33
+#define CLK_TOP_MSDCPLL 34
+#define CLK_TOP_MSDCPLL_D2 35
+#define CLK_TOP_MSDCPLL_D4 36
+#define CLK_TOP_MSDCPLL2 37
+#define CLK_TOP_MSDCPLL2_D2 38
+#define CLK_TOP_MSDCPLL2_D4 39
+#define CLK_TOP_SYSPLL_D2 40
+#define CLK_TOP_SYSPLL1_D2 41
+#define CLK_TOP_SYSPLL1_D4 42
+#define CLK_TOP_SYSPLL1_D8 43
+#define CLK_TOP_SYSPLL1_D16 44
+#define CLK_TOP_SYSPLL_D3 45
+#define CLK_TOP_SYSPLL2_D2 46
+#define CLK_TOP_SYSPLL2_D4 47
+#define CLK_TOP_SYSPLL_D5 48
+#define CLK_TOP_SYSPLL3_D2 49
+#define CLK_TOP_SYSPLL3_D4 50
+#define CLK_TOP_SYSPLL_D7 51
+#define CLK_TOP_SYSPLL4_D2 52
+#define CLK_TOP_SYSPLL4_D4 53
+#define CLK_TOP_TVDPLL 54
+#define CLK_TOP_TVDPLL_D2 55
+#define CLK_TOP_TVDPLL_D4 56
+#define CLK_TOP_TVDPLL_D8 57
+#define CLK_TOP_TVDPLL_D16 58
+#define CLK_TOP_UNIVPLL_D2 59
+#define CLK_TOP_UNIVPLL1_D2 60
+#define CLK_TOP_UNIVPLL1_D4 61
+#define CLK_TOP_UNIVPLL1_D8 62
+#define CLK_TOP_UNIVPLL_D3 63
+#define CLK_TOP_UNIVPLL2_D2 64
+#define CLK_TOP_UNIVPLL2_D4 65
+#define CLK_TOP_UNIVPLL2_D8 66
+#define CLK_TOP_UNIVPLL_D5 67
+#define CLK_TOP_UNIVPLL3_D2 68
+#define CLK_TOP_UNIVPLL3_D4 69
+#define CLK_TOP_UNIVPLL3_D8 70
+#define CLK_TOP_UNIVPLL_D7 71
+#define CLK_TOP_UNIVPLL_D26 72
+#define CLK_TOP_UNIVPLL_D52 73
+#define CLK_TOP_VCODECPLL 74
+#define CLK_TOP_VCODECPLL_370P5 75
+#define CLK_TOP_VENCPLL 76
+#define CLK_TOP_VENCPLL_D2 77
+#define CLK_TOP_VENCPLL_D4 78
+#define CLK_TOP_AXI_SEL 79
+#define CLK_TOP_MEM_SEL 80
+#define CLK_TOP_DDRPHYCFG_SEL 81
+#define CLK_TOP_MM_SEL 82
+#define CLK_TOP_PWM_SEL 83
+#define CLK_TOP_VDEC_SEL 84
+#define CLK_TOP_VENC_SEL 85
+#define CLK_TOP_MFG_SEL 86
+#define CLK_TOP_CAMTG_SEL 87
+#define CLK_TOP_UART_SEL 88
+#define CLK_TOP_SPI_SEL 89
+#define CLK_TOP_USB20_SEL 90
+#define CLK_TOP_USB30_SEL 91
+#define CLK_TOP_MSDC50_0_H_SEL 92
+#define CLK_TOP_MSDC50_0_SEL 93
+#define CLK_TOP_MSDC30_1_SEL 94
+#define CLK_TOP_MSDC30_2_SEL 95
+#define CLK_TOP_MSDC30_3_SEL 96
+#define CLK_TOP_AUDIO_SEL 97
+#define CLK_TOP_AUD_INTBUS_SEL 98
+#define CLK_TOP_PMICSPI_SEL 99
+#define CLK_TOP_SCP_SEL 100
+#define CLK_TOP_MJC_SEL 101
+#define CLK_TOP_DPI0_SEL 102
+#define CLK_TOP_IRDA_SEL 103
+#define CLK_TOP_CCI400_SEL 104
+#define CLK_TOP_AUD_1_SEL 105
+#define CLK_TOP_AUD_2_SEL 106
+#define CLK_TOP_MEM_MFG_IN_SEL 107
+#define CLK_TOP_AXI_MFG_IN_SEL 108
+#define CLK_TOP_SCAM_SEL 109
+#define CLK_TOP_I2S0_M_SEL 110
+#define CLK_TOP_I2S1_M_SEL 111
+#define CLK_TOP_I2S2_M_SEL 112
+#define CLK_TOP_I2S3_M_SEL 113
+#define CLK_TOP_I2S3_B_SEL 114
+#define CLK_TOP_APLL1_DIV0 115
+#define CLK_TOP_APLL1_DIV1 116
+#define CLK_TOP_APLL1_DIV2 117
+#define CLK_TOP_APLL1_DIV3 118
+#define CLK_TOP_APLL1_DIV4 119
+#define CLK_TOP_APLL1_DIV5 120
+#define CLK_TOP_APLL2_DIV0 121
+#define CLK_TOP_APLL2_DIV1 122
+#define CLK_TOP_APLL2_DIV2 123
+#define CLK_TOP_APLL2_DIV3 124
+#define CLK_TOP_APLL2_DIV4 125
+#define CLK_TOP_APLL2_DIV5 126
+#define CLK_TOP_NR_CLK 127
+
+/* APMIXED_SYS */
+#define CLK_APMIXED_ARMCA53PLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_VENCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_MPLL 7
+#define CLK_APMIXED_VCODECPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_REF2USB_TX 11
+#define CLK_APMIXED_NR_CLK 12
+
+/* INFRA_SYS */
+#define CLK_INFRA_DBGCLK 0
+#define CLK_INFRA_SMI 1
+#define CLK_INFRA_AUDIO 2
+#define CLK_INFRA_GCE 3
+#define CLK_INFRA_L2C_SRAM 4
+#define CLK_INFRA_M4U 5
+#define CLK_INFRA_MD1MCU 6
+#define CLK_INFRA_MD1BUS 7
+#define CLK_INFRA_MD1DBB 8
+#define CLK_INFRA_DEVICE_APC 9
+#define CLK_INFRA_TRNG 10
+#define CLK_INFRA_MD1LTE 11
+#define CLK_INFRA_CPUM 12
+#define CLK_INFRA_KP 13
+#define CLK_INFRA_CA53_C0_SEL 14
+#define CLK_INFRA_CA53_C1_SEL 15
+#define CLK_INFRA_NR_CLK 16
+
+/* PERI_SYS */
+#define CLK_PERI_NFI 0
+#define CLK_PERI_THERM 1
+#define CLK_PERI_PWM1 2
+#define CLK_PERI_PWM2 3
+#define CLK_PERI_PWM3 4
+#define CLK_PERI_PWM4 5
+#define CLK_PERI_PWM5 6
+#define CLK_PERI_PWM6 7
+#define CLK_PERI_PWM7 8
+#define CLK_PERI_PWM 9
+#define CLK_PERI_USB0 10
+#define CLK_PERI_USB1 11
+#define CLK_PERI_AP_DMA 12
+#define CLK_PERI_MSDC30_0 13
+#define CLK_PERI_MSDC30_1 14
+#define CLK_PERI_MSDC30_2 15
+#define CLK_PERI_MSDC30_3 16
+#define CLK_PERI_NLI_ARB 17
+#define CLK_PERI_IRDA 18
+#define CLK_PERI_UART0 19
+#define CLK_PERI_UART1 20
+#define CLK_PERI_UART2 21
+#define CLK_PERI_UART3 22
+#define CLK_PERI_I2C0 23
+#define CLK_PERI_I2C1 24
+#define CLK_PERI_I2C2 25
+#define CLK_PERI_I2C3 26
+#define CLK_PERI_I2C4 27
+#define CLK_PERI_AUXADC 28
+#define CLK_PERI_SPI0 29
+#define CLK_PERI_UART0_SEL 30
+#define CLK_PERI_UART1_SEL 31
+#define CLK_PERI_UART2_SEL 32
+#define CLK_PERI_UART3_SEL 33
+#define CLK_PERI_NR_CLK 34
+
+/* MFG */
+#define CLK_MFG_BAXI 0
+#define CLK_MFG_BMEM 1
+#define CLK_MFG_BG3D 2
+#define CLK_MFG_B26M 3
+#define CLK_MFG_NR_CLK 4
+
+/* MM_SYS */
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_CAM_MDP 2
+#define CLK_MM_MDP_RDMA0 3
+#define CLK_MM_MDP_RDMA1 4
+#define CLK_MM_MDP_RSZ0 5
+#define CLK_MM_MDP_RSZ1 6
+#define CLK_MM_MDP_RSZ2 7
+#define CLK_MM_MDP_TDSHP0 8
+#define CLK_MM_MDP_TDSHP1 9
+#define CLK_MM_MDP_CROP 10
+#define CLK_MM_MDP_WDMA 11
+#define CLK_MM_MDP_WROT0 12
+#define CLK_MM_MDP_WROT1 13
+#define CLK_MM_FAKE_ENG 14
+#define CLK_MM_MUTEX_32K 15
+#define CLK_MM_DISP_OVL0 16
+#define CLK_MM_DISP_OVL1 17
+#define CLK_MM_DISP_RDMA0 18
+#define CLK_MM_DISP_RDMA1 19
+#define CLK_MM_DISP_RDMA2 20
+#define CLK_MM_DISP_WDMA0 21
+#define CLK_MM_DISP_WDMA1 22
+#define CLK_MM_DISP_COLOR0 23
+#define CLK_MM_DISP_COLOR1 24
+#define CLK_MM_DISP_AAL 25
+#define CLK_MM_DISP_GAMMA 26
+#define CLK_MM_DISP_UFOE 27
+#define CLK_MM_DISP_SPLIT0 28
+#define CLK_MM_DISP_SPLIT1 29
+#define CLK_MM_DISP_MERGE 30
+#define CLK_MM_DISP_OD 31
+#define CLK_MM_DISP_PWM0MM 32
+#define CLK_MM_DISP_PWM026M 33
+#define CLK_MM_DISP_PWM1MM 34
+#define CLK_MM_DISP_PWM126M 35
+#define CLK_MM_DSI0_ENGINE 36
+#define CLK_MM_DSI0_DIGITAL 37
+#define CLK_MM_DSI1_ENGINE 38
+#define CLK_MM_DSI1_DIGITAL 39
+#define CLK_MM_DPI_PIXEL 40
+#define CLK_MM_DPI_ENGINE 41
+#define CLK_MM_NR_CLK 42
+
+/* VDEC_SYS */
+#define CLK_VDEC_CKEN 0
+#define CLK_VDEC_LARB_CKEN 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENC_SYS */
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT6795_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8365-clk.h b/include/dt-bindings/clock/mediatek,mt8365-clk.h
new file mode 100644
index 000000000000..f9aff1775810
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8365-clk.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8365_H
+#define _DT_BINDINGS_CLK_MT8365_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CLK_NULL 0
+#define CLK_TOP_I2S0_BCK 1
+#define CLK_TOP_DSI0_LNTC_DSICK 2
+#define CLK_TOP_VPLL_DPIX 3
+#define CLK_TOP_LVDSTX_CLKDIG_CTS 4
+#define CLK_TOP_MFGPLL 5
+#define CLK_TOP_SYSPLL_D2 6
+#define CLK_TOP_SYSPLL1_D2 7
+#define CLK_TOP_SYSPLL1_D4 8
+#define CLK_TOP_SYSPLL1_D8 9
+#define CLK_TOP_SYSPLL1_D16 10
+#define CLK_TOP_SYSPLL_D3 11
+#define CLK_TOP_SYSPLL2_D2 12
+#define CLK_TOP_SYSPLL2_D4 13
+#define CLK_TOP_SYSPLL2_D8 14
+#define CLK_TOP_SYSPLL_D5 15
+#define CLK_TOP_SYSPLL3_D2 16
+#define CLK_TOP_SYSPLL3_D4 17
+#define CLK_TOP_SYSPLL_D7 18
+#define CLK_TOP_SYSPLL4_D2 19
+#define CLK_TOP_SYSPLL4_D4 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_LVDSPLL_D2 35
+#define CLK_TOP_LVDSPLL_D4 36
+#define CLK_TOP_LVDSPLL_D8 37
+#define CLK_TOP_LVDSPLL_D16 38
+#define CLK_TOP_USB20_192M 39
+#define CLK_TOP_USB20_192M_D4 40
+#define CLK_TOP_USB20_192M_D8 41
+#define CLK_TOP_USB20_192M_D16 42
+#define CLK_TOP_USB20_192M_D32 43
+#define CLK_TOP_APLL1 44
+#define CLK_TOP_APLL1_D2 45
+#define CLK_TOP_APLL1_D4 46
+#define CLK_TOP_APLL1_D8 47
+#define CLK_TOP_APLL2 48
+#define CLK_TOP_APLL2_D2 49
+#define CLK_TOP_APLL2_D4 50
+#define CLK_TOP_APLL2_D8 51
+#define CLK_TOP_SYS_26M_D2 52
+#define CLK_TOP_MSDCPLL 53
+#define CLK_TOP_MSDCPLL_D2 54
+#define CLK_TOP_DSPPLL 55
+#define CLK_TOP_DSPPLL_D2 56
+#define CLK_TOP_DSPPLL_D4 57
+#define CLK_TOP_DSPPLL_D8 58
+#define CLK_TOP_APUPLL 59
+#define CLK_TOP_CLK26M_D52 60
+#define CLK_TOP_AXI_SEL 61
+#define CLK_TOP_MEM_SEL 62
+#define CLK_TOP_MM_SEL 63
+#define CLK_TOP_SCP_SEL 64
+#define CLK_TOP_MFG_SEL 65
+#define CLK_TOP_ATB_SEL 66
+#define CLK_TOP_CAMTG_SEL 67
+#define CLK_TOP_CAMTG1_SEL 68
+#define CLK_TOP_UART_SEL 69
+#define CLK_TOP_SPI_SEL 70
+#define CLK_TOP_MSDC50_0_HC_SEL 71
+#define CLK_TOP_MSDC2_2_HC_SEL 72
+#define CLK_TOP_MSDC50_0_SEL 73
+#define CLK_TOP_MSDC50_2_SEL 74
+#define CLK_TOP_MSDC30_1_SEL 75
+#define CLK_TOP_AUDIO_SEL 76
+#define CLK_TOP_AUD_INTBUS_SEL 77
+#define CLK_TOP_AUD_1_SEL 78
+#define CLK_TOP_AUD_2_SEL 79
+#define CLK_TOP_AUD_ENGEN1_SEL 80
+#define CLK_TOP_AUD_ENGEN2_SEL 81
+#define CLK_TOP_AUD_SPDIF_SEL 82
+#define CLK_TOP_DISP_PWM_SEL 83
+#define CLK_TOP_DXCC_SEL 84
+#define CLK_TOP_SSUSB_SYS_SEL 85
+#define CLK_TOP_SSUSB_XHCI_SEL 86
+#define CLK_TOP_SPM_SEL 87
+#define CLK_TOP_I2C_SEL 88
+#define CLK_TOP_PWM_SEL 89
+#define CLK_TOP_SENIF_SEL 90
+#define CLK_TOP_AES_FDE_SEL 91
+#define CLK_TOP_CAMTM_SEL 92
+#define CLK_TOP_DPI0_SEL 93
+#define CLK_TOP_DPI1_SEL 94
+#define CLK_TOP_DSP_SEL 95
+#define CLK_TOP_NFI2X_SEL 96
+#define CLK_TOP_NFIECC_SEL 97
+#define CLK_TOP_ECC_SEL 98
+#define CLK_TOP_ETH_SEL 99
+#define CLK_TOP_GCPU_SEL 100
+#define CLK_TOP_GCPU_CPM_SEL 101
+#define CLK_TOP_APU_SEL 102
+#define CLK_TOP_APU_IF_SEL 103
+#define CLK_TOP_MBIST_DIAG_SEL 104
+#define CLK_TOP_APLL_I2S0_SEL 105
+#define CLK_TOP_APLL_I2S1_SEL 106
+#define CLK_TOP_APLL_I2S2_SEL 107
+#define CLK_TOP_APLL_I2S3_SEL 108
+#define CLK_TOP_APLL_TDMOUT_SEL 109
+#define CLK_TOP_APLL_TDMIN_SEL 110
+#define CLK_TOP_APLL_SPDIF_SEL 111
+#define CLK_TOP_APLL12_CK_DIV0 112
+#define CLK_TOP_APLL12_CK_DIV1 113
+#define CLK_TOP_APLL12_CK_DIV2 114
+#define CLK_TOP_APLL12_CK_DIV3 115
+#define CLK_TOP_APLL12_CK_DIV4 116
+#define CLK_TOP_APLL12_CK_DIV4B 117
+#define CLK_TOP_APLL12_CK_DIV5 118
+#define CLK_TOP_APLL12_CK_DIV5B 119
+#define CLK_TOP_APLL12_CK_DIV6 120
+#define CLK_TOP_AUD_I2S0_M 121
+#define CLK_TOP_AUD_I2S1_M 122
+#define CLK_TOP_AUD_I2S2_M 123
+#define CLK_TOP_AUD_I2S3_M 124
+#define CLK_TOP_AUD_TDMOUT_M 125
+#define CLK_TOP_AUD_TDMOUT_B 126
+#define CLK_TOP_AUD_TDMIN_M 127
+#define CLK_TOP_AUD_TDMIN_B 128
+#define CLK_TOP_AUD_SPDIF_M 129
+#define CLK_TOP_USB20_48M_EN 130
+#define CLK_TOP_UNIVPLL_48M_EN 131
+#define CLK_TOP_LVDSTX_CLKDIG_EN 132
+#define CLK_TOP_VPLL_DPIX_EN 133
+#define CLK_TOP_SSUSB_TOP_CK_EN 134
+#define CLK_TOP_SSUSB_PHY_CK_EN 135
+#define CLK_TOP_CONN_32K 136
+#define CLK_TOP_CONN_26M 137
+#define CLK_TOP_DSP_32K 138
+#define CLK_TOP_DSP_26M 139
+#define CLK_TOP_NR_CLK 140
+
+/* INFRACFG */
+#define CLK_IFR_PMIC_TMR 0
+#define CLK_IFR_PMIC_AP 1
+#define CLK_IFR_PMIC_MD 2
+#define CLK_IFR_PMIC_CONN 3
+#define CLK_IFR_ICUSB 4
+#define CLK_IFR_GCE 5
+#define CLK_IFR_THERM 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_UART2 16
+#define CLK_IFR_DSP_UART 17
+#define CLK_IFR_GCE_26M 18
+#define CLK_IFR_CQ_DMA_FPC 19
+#define CLK_IFR_BTIF 20
+#define CLK_IFR_SPI0 21
+#define CLK_IFR_MSDC0_HCLK 22
+#define CLK_IFR_MSDC2_HCLK 23
+#define CLK_IFR_MSDC1_HCLK 24
+#define CLK_IFR_DVFSRC 25
+#define CLK_IFR_GCPU 26
+#define CLK_IFR_TRNG 27
+#define CLK_IFR_AUXADC 28
+#define CLK_IFR_CPUM 29
+#define CLK_IFR_AUXADC_MD 30
+#define CLK_IFR_AP_DMA 31
+#define CLK_IFR_DEBUGSYS 32
+#define CLK_IFR_AUDIO 33
+#define CLK_IFR_PWM_FBCLK6 34
+#define CLK_IFR_DISP_PWM 35
+#define CLK_IFR_AUD_26M_BK 36
+#define CLK_IFR_CQ_DMA 37
+#define CLK_IFR_MSDC0_SF 38
+#define CLK_IFR_MSDC1_SF 39
+#define CLK_IFR_MSDC2_SF 40
+#define CLK_IFR_AP_MSDC0 41
+#define CLK_IFR_MD_MSDC0 42
+#define CLK_IFR_MSDC0_SRC 43
+#define CLK_IFR_MSDC1_SRC 44
+#define CLK_IFR_MSDC2_SRC 45
+#define CLK_IFR_PWRAP_TMR 46
+#define CLK_IFR_PWRAP_SPI 47
+#define CLK_IFR_PWRAP_SYS 48
+#define CLK_IFR_MCU_PM_BK 49
+#define CLK_IFR_IRRX_26M 50
+#define CLK_IFR_IRRX_32K 51
+#define CLK_IFR_I2C0_AXI 52
+#define CLK_IFR_I2C1_AXI 53
+#define CLK_IFR_I2C2_AXI 54
+#define CLK_IFR_I2C3_AXI 55
+#define CLK_IFR_NIC_AXI 56
+#define CLK_IFR_NIC_SLV_AXI 57
+#define CLK_IFR_APU_AXI 58
+#define CLK_IFR_NFIECC 59
+#define CLK_IFR_NFIECC_BK 60
+#define CLK_IFR_NFI1X_BK 61
+#define CLK_IFR_NFI_BK 62
+#define CLK_IFR_MSDC2_AP_BK 63
+#define CLK_IFR_MSDC2_MD_BK 64
+#define CLK_IFR_MSDC2_BK 65
+#define CLK_IFR_SUSB_133_BK 66
+#define CLK_IFR_SUSB_66_BK 67
+#define CLK_IFR_SSUSB_SYS 68
+#define CLK_IFR_SSUSB_REF 69
+#define CLK_IFR_SSUSB_XHCI 70
+#define CLK_IFR_NR_CLK 71
+
+/* PERICFG */
+#define CLK_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MFGPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_APLL1 6
+#define CLK_APMIXED_APLL2 7
+#define CLK_APMIXED_LVDSPLL 8
+#define CLK_APMIXED_DSPPLL 9
+#define CLK_APMIXED_APUPLL 10
+#define CLK_APMIXED_UNIV_EN 11
+#define CLK_APMIXED_USB20_EN 12
+#define CLK_APMIXED_NR_CLK 13
+
+/* GCE */
+#define CLK_GCE_FAXI 0
+#define CLK_GCE_NR_CLK 1
+
+/* AUDIOTOP */
+#define CLK_AUD_AFE 0
+#define CLK_AUD_I2S 1
+#define CLK_AUD_22M 2
+#define CLK_AUD_24M 3
+#define CLK_AUD_INTDIR 4
+#define CLK_AUD_APLL2_TUNER 5
+#define CLK_AUD_APLL_TUNER 6
+#define CLK_AUD_SPDF 7
+#define CLK_AUD_HDMI 8
+#define CLK_AUD_HDMI_IN 9
+#define CLK_AUD_ADC 10
+#define CLK_AUD_DAC 11
+#define CLK_AUD_DAC_PREDIS 12
+#define CLK_AUD_TML 13
+#define CLK_AUD_I2S1_BK 14
+#define CLK_AUD_I2S2_BK 15
+#define CLK_AUD_I2S3_BK 16
+#define CLK_AUD_I2S4_BK 17
+#define CLK_AUD_NR_CLK 18
+
+/* MIPI_CSI0A */
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI_RX_ANA_CSI0A_NR_CLK 1
+
+/* MIPI_CSI0B */
+#define CLK_MIPI0B_CSR_CSI_EN_0B 0
+#define CLK_MIPI_RX_ANA_CSI0B_NR_CLK 1
+
+/* MIPI_CSI1A */
+#define CLK_MIPI1A_CSR_CSI_EN_1A 0
+#define CLK_MIPI_RX_ANA_CSI1A_NR_CLK 1
+
+/* MIPI_CSI1B */
+#define CLK_MIPI1B_CSR_CSI_EN_1B 0
+#define CLK_MIPI_RX_ANA_CSI1B_NR_CLK 1
+
+/* MIPI_CSI2A */
+#define CLK_MIPI2A_CSR_CSI_EN_2A 0
+#define CLK_MIPI_RX_ANA_CSI2A_NR_CLK 1
+
+/* MIPI_CSI2B */
+#define CLK_MIPI2B_CSR_CSI_EN_2B 0
+#define CLK_MIPI_RX_ANA_CSI2B_NR_CLK 1
+
+/* MCUCFG */
+#define CLK_MCU_BUS_SEL 0
+#define CLK_MCU_NR_CLK 1
+
+/* MFGCFG */
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_MBIST_DIAG 1
+#define CLK_MFG_NR_CLK 2
+
+/* MMSYS */
+#define CLK_MM_MM_MDP_RDMA0 0
+#define CLK_MM_MM_MDP_CCORR0 1
+#define CLK_MM_MM_MDP_RSZ0 2
+#define CLK_MM_MM_MDP_RSZ1 3
+#define CLK_MM_MM_MDP_TDSHP0 4
+#define CLK_MM_MM_MDP_WROT0 5
+#define CLK_MM_MM_MDP_WDMA0 6
+#define CLK_MM_MM_DISP_OVL0 7
+#define CLK_MM_MM_DISP_OVL0_2L 8
+#define CLK_MM_MM_DISP_RSZ0 9
+#define CLK_MM_MM_DISP_RDMA0 10
+#define CLK_MM_MM_DISP_WDMA0 11
+#define CLK_MM_MM_DISP_COLOR0 12
+#define CLK_MM_MM_DISP_CCORR0 13
+#define CLK_MM_MM_DISP_AAL0 14
+#define CLK_MM_MM_DISP_GAMMA0 15
+#define CLK_MM_MM_DISP_DITHER0 16
+#define CLK_MM_MM_DSI0 17
+#define CLK_MM_MM_DISP_RDMA1 18
+#define CLK_MM_MM_MDP_RDMA1 19
+#define CLK_MM_DPI0_DPI0 20
+#define CLK_MM_MM_FAKE 21
+#define CLK_MM_MM_SMI_COMMON 22
+#define CLK_MM_MM_SMI_LARB0 23
+#define CLK_MM_MM_SMI_COMM0 24
+#define CLK_MM_MM_SMI_COMM1 25
+#define CLK_MM_MM_CAM_MDP 26
+#define CLK_MM_MM_SMI_IMG 27
+#define CLK_MM_MM_SMI_CAM 28
+#define CLK_MM_IMG_IMG_DL_RELAY 29
+#define CLK_MM_IMG_IMG_DL_ASYNC_TOP 30
+#define CLK_MM_DSI0_DIG_DSI 31
+#define CLK_MM_26M_HRTWT 32
+#define CLK_MM_MM_DPI0 33
+#define CLK_MM_LVDSTX_PXL 34
+#define CLK_MM_LVDSTX_CTS 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+#define CLK_CAM_LARB2 0
+#define CLK_CAM 1
+#define CLK_CAMTG 2
+#define CLK_CAM_SENIF 3
+#define CLK_CAMSV0 4
+#define CLK_CAMSV1 5
+#define CLK_CAM_FDVT 6
+#define CLK_CAM_WPE 7
+#define CLK_CAM_NR_CLK 8
+
+/* VDECSYS */
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_LARB1 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENCSYS */
+#define CLK_VENC 0
+#define CLK_VENC_JPGENC 1
+#define CLK_VENC_NR_CLK 2
+
+/* APUSYS */
+#define CLK_APU_IPU_CK 0
+#define CLK_APU_AXI 1
+#define CLK_APU_JTAG 2
+#define CLK_APU_IF_CK 3
+#define CLK_APU_EDMA 4
+#define CLK_APU_AHB 5
+#define CLK_APU_NR_CLK 6
+
+#endif /* _DT_BINDINGS_CLK_MT8365_H */
diff --git a/include/dt-bindings/clock/microchip,mpfs-clock.h b/include/dt-bindings/clock/microchip,mpfs-clock.h
index 4048669bf756..79775a5134ca 100644
--- a/include/dt-bindings/clock/microchip,mpfs-clock.h
+++ b/include/dt-bindings/clock/microchip,mpfs-clock.h
@@ -45,4 +45,27 @@
#define CLK_RTCREF 33
#define CLK_MSSPLL 34
+/* Clock Conditioning Circuitry Clock IDs */
+
+#define CLK_CCC_PLL0 0
+#define CLK_CCC_PLL1 1
+#define CLK_CCC_DLL0 2
+#define CLK_CCC_DLL1 3
+
+#define CLK_CCC_PLL0_OUT0 4
+#define CLK_CCC_PLL0_OUT1 5
+#define CLK_CCC_PLL0_OUT2 6
+#define CLK_CCC_PLL0_OUT3 7
+
+#define CLK_CCC_PLL1_OUT0 8
+#define CLK_CCC_PLL1_OUT1 9
+#define CLK_CCC_PLL1_OUT2 10
+#define CLK_CCC_PLL1_OUT3 11
+
+#define CLK_CCC_DLL0_OUT0 12
+#define CLK_CCC_DLL0_OUT1 13
+
+#define CLK_CCC_DLL1_OUT0 14
+#define CLK_CCC_DLL1_OUT1 15
+
#endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */
diff --git a/include/dt-bindings/clock/mt8195-clk.h b/include/dt-bindings/clock/mt8195-clk.h
index 95cf812a0b37..d70d017ad69c 100644
--- a/include/dt-bindings/clock/mt8195-clk.h
+++ b/include/dt-bindings/clock/mt8195-clk.h
@@ -859,6 +859,8 @@
#define CLK_VDO1_DPINTF 47
#define CLK_VDO1_DISP_MONITOR_DPINTF 48
#define CLK_VDO1_26M_SLOW 49
-#define CLK_VDO1_NR_CLK 50
+#define CLK_VDO1_DPI1_HDMI 50
+#define CLK_VDO1_NR_CLK 51
+
#endif /* _DT_BINDINGS_CLK_MT8195_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8909.h b/include/dt-bindings/clock/qcom,gcc-msm8909.h
new file mode 100644
index 000000000000..4394ba003425
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8909.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Kernkonzept GmbH.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+
+/* PLLs */
+#define GPLL0_EARLY 0
+#define GPLL0 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2_EARLY 4
+#define GPLL2 5
+#define BIMC_PLL_EARLY 6
+#define BIMC_PLL 7
+
+/* RCGs */
+#define APSS_AHB_CLK_SRC 8
+#define BIMC_DDR_CLK_SRC 9
+#define BIMC_GPU_CLK_SRC 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define BYTE0_CLK_SRC 25
+#define CAMSS_GP0_CLK_SRC 26
+#define CAMSS_GP1_CLK_SRC 27
+#define CAMSS_TOP_AHB_CLK_SRC 28
+#define CODEC_DIGCODEC_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0_CLK_SRC 31
+#define CSI0PHYTIMER_CLK_SRC 32
+#define CSI1_CLK_SRC 33
+#define ESC0_CLK_SRC 34
+#define GFX3D_CLK_SRC 35
+#define GP1_CLK_SRC 36
+#define GP2_CLK_SRC 37
+#define GP3_CLK_SRC 38
+#define MCLK0_CLK_SRC 39
+#define MCLK1_CLK_SRC 40
+#define MDP_CLK_SRC 41
+#define PCLK0_CLK_SRC 42
+#define PCNOC_BFDCD_CLK_SRC 43
+#define PDM2_CLK_SRC 44
+#define SDCC1_APPS_CLK_SRC 45
+#define SDCC2_APPS_CLK_SRC 46
+#define SYSTEM_NOC_BFDCD_CLK_SRC 47
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 48
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 49
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 50
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 51
+#define ULTAUDIO_XO_CLK_SRC 52
+#define USB_HS_SYSTEM_CLK_SRC 53
+#define VCODEC0_CLK_SRC 54
+#define VFE0_CLK_SRC 55
+#define VSYNC_CLK_SRC 56
+
+/* Voteable Clocks */
+#define GCC_APSS_TCU_CLK 57
+#define GCC_BLSP1_AHB_CLK 58
+#define GCC_BLSP1_SLEEP_CLK 59
+#define GCC_BOOT_ROM_AHB_CLK 60
+#define GCC_CRYPTO_CLK 61
+#define GCC_CRYPTO_AHB_CLK 62
+#define GCC_CRYPTO_AXI_CLK 63
+#define GCC_GFX_TBU_CLK 64
+#define GCC_GFX_TCU_CLK 65
+#define GCC_GTCU_AHB_CLK 66
+#define GCC_MDP_TBU_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_SMMU_CFG_CLK 69
+#define GCC_VENUS_TBU_CLK 70
+#define GCC_VFE_TBU_CLK 71
+
+/* Branches */
+#define GCC_BIMC_GFX_CLK 72
+#define GCC_BIMC_GPU_CLK 73
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 74
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 75
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 76
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 77
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 78
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 79
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 80
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 81
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 82
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 83
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 84
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 85
+#define GCC_BLSP1_UART1_APPS_CLK 86
+#define GCC_BLSP1_UART2_APPS_CLK 87
+#define GCC_CAMSS_AHB_CLK 88
+#define GCC_CAMSS_CSI0_CLK 89
+#define GCC_CAMSS_CSI0_AHB_CLK 90
+#define GCC_CAMSS_CSI0PHY_CLK 91
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI0PIX_CLK 93
+#define GCC_CAMSS_CSI0RDI_CLK 94
+#define GCC_CAMSS_CSI1_CLK 95
+#define GCC_CAMSS_CSI1_AHB_CLK 96
+#define GCC_CAMSS_CSI1PHY_CLK 97
+#define GCC_CAMSS_CSI1PIX_CLK 98
+#define GCC_CAMSS_CSI1RDI_CLK 99
+#define GCC_CAMSS_CSI_VFE0_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_MCLK0_CLK 104
+#define GCC_CAMSS_MCLK1_CLK 105
+#define GCC_CAMSS_TOP_AHB_CLK 106
+#define GCC_CAMSS_VFE0_CLK 107
+#define GCC_CAMSS_VFE_AHB_CLK 108
+#define GCC_CAMSS_VFE_AXI_CLK 109
+#define GCC_CODEC_DIGCODEC_CLK 110
+#define GCC_GP1_CLK 111
+#define GCC_GP2_CLK 112
+#define GCC_GP3_CLK 113
+#define GCC_MDSS_AHB_CLK 114
+#define GCC_MDSS_AXI_CLK 115
+#define GCC_MDSS_BYTE0_CLK 116
+#define GCC_MDSS_ESC0_CLK 117
+#define GCC_MDSS_MDP_CLK 118
+#define GCC_MDSS_PCLK0_CLK 119
+#define GCC_MDSS_VSYNC_CLK 120
+#define GCC_MSS_CFG_AHB_CLK 121
+#define GCC_MSS_Q6_BIMC_AXI_CLK 122
+#define GCC_OXILI_AHB_CLK 123
+#define GCC_OXILI_GFX3D_CLK 124
+#define GCC_PDM2_CLK 125
+#define GCC_PDM_AHB_CLK 126
+#define GCC_SDCC1_AHB_CLK 127
+#define GCC_SDCC1_APPS_CLK 128
+#define GCC_SDCC2_AHB_CLK 129
+#define GCC_SDCC2_APPS_CLK 130
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 131
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 132
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 133
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 134
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 135
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 136
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 137
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 138
+#define GCC_ULTAUDIO_STC_XO_CLK 139
+#define GCC_USB2A_PHY_SLEEP_CLK 140
+#define GCC_USB_HS_AHB_CLK 141
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 142
+#define GCC_USB_HS_SYSTEM_CLK 143
+#define GCC_VENUS0_AHB_CLK 144
+#define GCC_VENUS0_AXI_CLK 145
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 146
+#define GCC_VENUS0_VCODEC0_CLK 147
+
+/* Resets */
+#define GCC_AUDIO_CORE_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_QUP4_BCR 5
+#define GCC_BLSP1_QUP5_BCR 6
+#define GCC_BLSP1_QUP6_BCR 7
+#define GCC_BLSP1_UART1_BCR 8
+#define GCC_BLSP1_UART2_BCR 9
+#define GCC_CAMSS_CSI0_BCR 10
+#define GCC_CAMSS_CSI0PHY_BCR 11
+#define GCC_CAMSS_CSI0PIX_BCR 12
+#define GCC_CAMSS_CSI0RDI_BCR 13
+#define GCC_CAMSS_CSI1_BCR 14
+#define GCC_CAMSS_CSI1PHY_BCR 15
+#define GCC_CAMSS_CSI1PIX_BCR 16
+#define GCC_CAMSS_CSI1RDI_BCR 17
+#define GCC_CAMSS_CSI_VFE0_BCR 18
+#define GCC_CAMSS_GP0_BCR 19
+#define GCC_CAMSS_GP1_BCR 20
+#define GCC_CAMSS_ISPIF_BCR 21
+#define GCC_CAMSS_MCLK0_BCR 22
+#define GCC_CAMSS_MCLK1_BCR 23
+#define GCC_CAMSS_PHY0_BCR 24
+#define GCC_CAMSS_TOP_BCR 25
+#define GCC_CAMSS_TOP_AHB_BCR 26
+#define GCC_CAMSS_VFE_BCR 27
+#define GCC_CRYPTO_BCR 28
+#define GCC_MDSS_BCR 29
+#define GCC_OXILI_BCR 30
+#define GCC_PDM_BCR 31
+#define GCC_PRNG_BCR 32
+#define GCC_QUSB2_PHY_BCR 33
+#define GCC_SDCC1_BCR 34
+#define GCC_SDCC2_BCR 35
+#define GCC_ULT_AUDIO_BCR 36
+#define GCC_USB2A_PHY_BCR 37
+#define GCC_USB2_HS_PHY_ONLY_BCR 38
+#define GCC_USB_HS_BCR 39
+#define GCC_VENUS0_BCR 40
+
+/* Subsystem Restart */
+#define GCC_MSS_RESTART 41
+
+/* Power Domains */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+#define VENUS_GDSC 2
+#define VENUS_CORE0_GDSC 3
+#define VFE_GDSC 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 968fa65b9c42..d78b899263a2 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -199,6 +199,7 @@
#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
#define GCC_LPASS_Q6_AXI_CLK 190
#define GCC_LPASS_SWAY_CLK 191
+#define GPLL6 192
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
new file mode 100644
index 000000000000..bb7da46333b0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_GX_GMU_CLK 11
+#define GPU_CC_GX_VSENSE_CLK 12
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 13
+#define GPU_CC_HUB_AON_CLK 14
+#define GPU_CC_HUB_CLK_SRC 15
+#define GPU_CC_HUB_CX_INT_CLK 16
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 17
+#define GPU_CC_SLEEP_CLK 18
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
index 25b92bbf0ab4..e0fb4acf4ba8 100644
--- a/include/dt-bindings/clock/qcom,lcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -19,4 +19,6 @@
#define SPDIF_CLK 10
#define AHBIX_CLK 11
+#define LCC_PCM_RESET 0
+
#endif
diff --git a/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
index 20ef2ea673f3..22dcd47d4513 100644
--- a/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
+++ b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
@@ -24,6 +24,11 @@
#define LPASS_AUDIO_CC_RX_MCLK_CLK 14
#define LPASS_AUDIO_CC_RX_MCLK_CLK_SRC 15
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_TX_CGCR 1
+#define LPASS_AUDIO_SWR_WSA_CGCR 2
+
/* LPASS_AON_CC clocks */
#define LPASS_AON_CC_PLL 0
#define LPASS_AON_CC_PLL_OUT_EVEN 1
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
index 28ed2a07aacc..0324c69ce968 100644
--- a/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
+++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
@@ -19,6 +19,8 @@
#define LPASS_CORE_CC_LPM_CORE_CLK 9
#define LPASS_CORE_CC_LPM_MEM0_CORE_CLK 10
#define LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK 11
+#define LPASS_CORE_CC_EXT_MCLK0_CLK 12
+#define LPASS_CORE_CC_EXT_MCLK0_CLK_SRC 13
/* LPASS_CORE_CC power domains */
#define LPASS_CORE_CC_LPASS_CORE_HM_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 015db95303d1..c0ad624e930e 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -167,5 +167,6 @@
#define RPM_SMD_CPUSS_GNOC_A_CLK 121
#define RPM_SMD_MSS_CFG_AHB_CLK 122
#define RPM_SMD_MSS_CFG_AHB_A_CLK 123
+#define RPM_SMD_BIMC_FREQ_LOG 124
#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-dispcc.h b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
new file mode 100644
index 000000000000..d1a6c45b5029
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL0_OUT_MAIN 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT_CLK 12
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 15
+#define DISP_CC_MDSS_ROT_CLK 16
+#define DISP_CC_MDSS_ROT_CLK_SRC 17
+#define DISP_CC_MDSS_VSYNC_CLK 18
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 19
+#define DISP_CC_SLEEP_CLK 20
+#define DISP_CC_SLEEP_CLK_SRC 21
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-gcc.h b/include/dt-bindings/clock/qcom,sm6375-gcc.h
new file mode 100644
index 000000000000..1e9801e1cedf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-gcc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+
+/* Clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL1 3
+#define GPLL10 4
+#define GPLL11 5
+#define GPLL3 6
+#define GPLL3_OUT_EVEN 7
+#define GPLL4 8
+#define GPLL5 9
+#define GPLL6 10
+#define GPLL6_OUT_EVEN 11
+#define GPLL7 12
+#define GPLL8 13
+#define GPLL8_OUT_EVEN 14
+#define GPLL9 15
+#define GPLL9_OUT_MAIN 16
+#define GCC_AHB2PHY_CSI_CLK 17
+#define GCC_AHB2PHY_USB_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_0_CLK_SRC 30
+#define GCC_CAMSS_CCI_1_CLK 31
+#define GCC_CAMSS_CCI_1_CLK_SRC 32
+#define GCC_CAMSS_CPHY_0_CLK 33
+#define GCC_CAMSS_CPHY_1_CLK 34
+#define GCC_CAMSS_CPHY_2_CLK 35
+#define GCC_CAMSS_CPHY_3_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 37
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 38
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 39
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 40
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 41
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 42
+#define GCC_CAMSS_CSI3PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI3PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_MCLK0_CLK 45
+#define GCC_CAMSS_MCLK0_CLK_SRC 46
+#define GCC_CAMSS_MCLK1_CLK 47
+#define GCC_CAMSS_MCLK1_CLK_SRC 48
+#define GCC_CAMSS_MCLK2_CLK 49
+#define GCC_CAMSS_MCLK2_CLK_SRC 50
+#define GCC_CAMSS_MCLK3_CLK 51
+#define GCC_CAMSS_MCLK3_CLK_SRC 52
+#define GCC_CAMSS_MCLK4_CLK 53
+#define GCC_CAMSS_MCLK4_CLK_SRC 54
+#define GCC_CAMSS_NRT_AXI_CLK 55
+#define GCC_CAMSS_OPE_AHB_CLK 56
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 57
+#define GCC_CAMSS_OPE_CLK 58
+#define GCC_CAMSS_OPE_CLK_SRC 59
+#define GCC_CAMSS_RT_AXI_CLK 60
+#define GCC_CAMSS_TFE_0_CLK 61
+#define GCC_CAMSS_TFE_0_CLK_SRC 62
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 63
+#define GCC_CAMSS_TFE_0_CSID_CLK 64
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 65
+#define GCC_CAMSS_TFE_1_CLK 66
+#define GCC_CAMSS_TFE_1_CLK_SRC 67
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 68
+#define GCC_CAMSS_TFE_1_CSID_CLK 69
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 70
+#define GCC_CAMSS_TFE_2_CLK 71
+#define GCC_CAMSS_TFE_2_CLK_SRC 72
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 73
+#define GCC_CAMSS_TFE_2_CSID_CLK 74
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 75
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 76
+#define GCC_CAMSS_TOP_AHB_CLK 77
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 78
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 79
+#define GCC_CPUSS_AHB_CLK_SRC 80
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 81
+#define GCC_CPUSS_GNOC_CLK 82
+#define GCC_DISP_AHB_CLK 83
+#define GCC_DISP_GPLL0_CLK_SRC 84
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 85
+#define GCC_DISP_HF_AXI_CLK 86
+#define GCC_DISP_SLEEP_CLK 87
+#define GCC_DISP_THROTTLE_CORE_CLK 88
+#define GCC_DISP_XO_CLK 89
+#define GCC_GP1_CLK 90
+#define GCC_GP1_CLK_SRC 91
+#define GCC_GP2_CLK 92
+#define GCC_GP2_CLK_SRC 93
+#define GCC_GP3_CLK 94
+#define GCC_GP3_CLK_SRC 95
+#define GCC_GPU_CFG_AHB_CLK 96
+#define GCC_GPU_GPLL0_CLK_SRC 97
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 98
+#define GCC_GPU_MEMNOC_GFX_CLK 99
+#define GCC_GPU_SNOC_DVM_GFX_CLK 100
+#define GCC_GPU_THROTTLE_CORE_CLK 101
+#define GCC_PDM2_CLK 102
+#define GCC_PDM2_CLK_SRC 103
+#define GCC_PDM_AHB_CLK 104
+#define GCC_PDM_XO4_CLK 105
+#define GCC_PRNG_AHB_CLK 106
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 107
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 108
+#define GCC_QMIP_DISP_AHB_CLK 109
+#define GCC_QMIP_GPU_CFG_AHB_CLK 110
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 111
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 112
+#define GCC_QUPV3_WRAP0_CORE_CLK 113
+#define GCC_QUPV3_WRAP0_S0_CLK 114
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 115
+#define GCC_QUPV3_WRAP0_S1_CLK 116
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 117
+#define GCC_QUPV3_WRAP0_S2_CLK 118
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 119
+#define GCC_QUPV3_WRAP0_S3_CLK 120
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 121
+#define GCC_QUPV3_WRAP0_S4_CLK 122
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 123
+#define GCC_QUPV3_WRAP0_S5_CLK 124
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 125
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 126
+#define GCC_QUPV3_WRAP1_CORE_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK 128
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 129
+#define GCC_QUPV3_WRAP1_S1_CLK 130
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 131
+#define GCC_QUPV3_WRAP1_S2_CLK 132
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 133
+#define GCC_QUPV3_WRAP1_S3_CLK 134
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 135
+#define GCC_QUPV3_WRAP1_S4_CLK 136
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 137
+#define GCC_QUPV3_WRAP1_S5_CLK 138
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 139
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 142
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 143
+#define GCC_RX5_PCIE_CLKREF_EN_CLK 144
+#define GCC_SDCC1_AHB_CLK 145
+#define GCC_SDCC1_APPS_CLK 146
+#define GCC_SDCC1_APPS_CLK_SRC 147
+#define GCC_SDCC1_ICE_CORE_CLK 148
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 149
+#define GCC_SDCC2_AHB_CLK 150
+#define GCC_SDCC2_APPS_CLK 151
+#define GCC_SDCC2_APPS_CLK_SRC 152
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 153
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 154
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 155
+#define GCC_UFS_MEM_CLKREF_CLK 156
+#define GCC_UFS_PHY_AHB_CLK 157
+#define GCC_UFS_PHY_AXI_CLK 158
+#define GCC_UFS_PHY_AXI_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_CLK 160
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161
+#define GCC_UFS_PHY_PHY_AUX_CLK 162
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_USB30_PRIM_MASTER_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 169
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172
+#define GCC_USB30_PRIM_SLEEP_CLK 173
+#define GCC_USB3_PRIM_CLKREF_CLK 174
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 177
+#define GCC_VCODEC0_AXI_CLK 178
+#define GCC_VENUS_AHB_CLK 179
+#define GCC_VENUS_CTL_AXI_CLK 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_THROTTLE_CORE_CLK 183
+#define GCC_VIDEO_VCODEC0_SYS_CLK 184
+#define GCC_VIDEO_VENUS_CLK_SRC 185
+#define GCC_VIDEO_VENUS_CTL_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+
+/* Resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_PRNG_BCR 6
+#define GCC_QUPV3_WRAPPER_0_BCR 7
+#define GCC_QUPV3_WRAPPER_1_BCR 8
+#define GCC_QUSB2PHY_PRIM_BCR 9
+#define GCC_QUSB2PHY_SEC_BCR 10
+#define GCC_SDCC1_BCR 11
+#define GCC_SDCC2_BCR 12
+#define GCC_UFS_PHY_BCR 13
+#define GCC_USB30_PRIM_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+#define GCC_VCODEC0_BCR 16
+#define GCC_VENUS_BCR 17
+#define GCC_VIDEO_INTERFACE_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_PRIM_SP0_BCR 20
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_TOP_GDSC 2
+#define VENUS_GDSC 3
+#define VCODEC0_GDSC 4
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-dispcc.h b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
new file mode 100644
index 000000000000..fd16ca894971
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_BYTE1_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 15
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 17
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 25
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 29
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 31
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 36
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 37
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 39
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 41
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 47
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 48
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 52
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC0_CLK 55
+#define DISP_CC_MDSS_ESC0_CLK_SRC 56
+#define DISP_CC_MDSS_ESC1_CLK 57
+#define DISP_CC_MDSS_ESC1_CLK_SRC 58
+#define DISP_CC_MDSS_MDP1_CLK 59
+#define DISP_CC_MDSS_MDP_CLK 60
+#define DISP_CC_MDSS_MDP_CLK_SRC 61
+#define DISP_CC_MDSS_MDP_LUT1_CLK 62
+#define DISP_CC_MDSS_MDP_LUT_CLK 63
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 64
+#define DISP_CC_MDSS_PCLK0_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 66
+#define DISP_CC_MDSS_PCLK1_CLK 67
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 68
+#define DISP_CC_MDSS_ROT1_CLK 69
+#define DISP_CC_MDSS_ROT_CLK 70
+#define DISP_CC_MDSS_ROT_CLK_SRC 71
+#define DISP_CC_MDSS_RSCC_AHB_CLK 72
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC1_CLK 74
+#define DISP_CC_MDSS_VSYNC_CLK 75
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 76
+#define DISP_CC_PLL0 77
+#define DISP_CC_PLL1 78
+#define DISP_CC_SLEEP_CLK 79
+#define DISP_CC_SLEEP_CLK_SRC 80
+#define DISP_CC_XO_CLK 81
+#define DISP_CC_XO_CLK_SRC 82
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rv1126-cru.h b/include/dt-bindings/clock/rockchip,rv1126-cru.h
new file mode 100644
index 000000000000..e89a3a5a4a34
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1126-cru.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+
+/* pmucru-clocks indices */
+
+/* pll clocks */
+#define PLL_GPLL 1
+
+/* sclk (special clocks) */
+#define CLK_OSC0_DIV32K 2
+#define CLK_RTC32K 3
+#define CLK_WIFI_DIV 4
+#define CLK_WIFI_OSC0 5
+#define CLK_WIFI 6
+#define CLK_PMU 7
+#define SCLK_UART1_DIV 8
+#define SCLK_UART1_FRACDIV 9
+#define SCLK_UART1_MUX 10
+#define SCLK_UART1 11
+#define CLK_I2C0 12
+#define CLK_I2C2 13
+#define CLK_CAPTURE_PWM0 14
+#define CLK_PWM0 15
+#define CLK_CAPTURE_PWM1 16
+#define CLK_PWM1 17
+#define CLK_SPI0 18
+#define DBCLK_GPIO0 19
+#define CLK_PMUPVTM 20
+#define CLK_CORE_PMUPVTM 21
+#define CLK_REF12M 22
+#define CLK_USBPHY_OTG_REF 23
+#define CLK_USBPHY_HOST_REF 24
+#define CLK_REF24M 25
+#define CLK_MIPIDSIPHY_REF 26
+
+/* pclk */
+#define PCLK_PDPMU 30
+#define PCLK_PMU 31
+#define PCLK_UART1 32
+#define PCLK_I2C0 33
+#define PCLK_I2C2 34
+#define PCLK_PWM0 35
+#define PCLK_PWM1 36
+#define PCLK_SPI0 37
+#define PCLK_GPIO0 38
+#define PCLK_PMUSGRF 39
+#define PCLK_PMUGRF 40
+#define PCLK_PMUCRU 41
+#define PCLK_CHIPVEROTP 42
+#define PCLK_PDPMU_NIU 43
+#define PCLK_PMUPVTM 44
+#define PCLK_SCRKEYGEN 45
+
+#define CLKPMU_NR_CLKS (PCLK_SCRKEYGEN + 1)
+
+/* cru-clocks indices */
+
+/* pll clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_HPLL 4
+
+/* sclk (special clocks) */
+#define ARMCLK 5
+#define USB480M 6
+#define CLK_CORE_CPUPVTM 7
+#define CLK_CPUPVTM 8
+#define CLK_SCR1 9
+#define CLK_SCR1_CORE 10
+#define CLK_SCR1_RTC 11
+#define CLK_SCR1_JTAG 12
+#define SCLK_UART0_DIV 13
+#define SCLK_UART0_FRAC 14
+#define SCLK_UART0_MUX 15
+#define SCLK_UART0 16
+#define SCLK_UART2_DIV 17
+#define SCLK_UART2_FRAC 18
+#define SCLK_UART2_MUX 19
+#define SCLK_UART2 20
+#define SCLK_UART3_DIV 21
+#define SCLK_UART3_FRAC 22
+#define SCLK_UART3_MUX 23
+#define SCLK_UART3 24
+#define SCLK_UART4_DIV 25
+#define SCLK_UART4_FRAC 26
+#define SCLK_UART4_MUX 27
+#define SCLK_UART4 28
+#define SCLK_UART5_DIV 29
+#define SCLK_UART5_FRAC 30
+#define SCLK_UART5_MUX 31
+#define SCLK_UART5 32
+#define CLK_I2C1 33
+#define CLK_I2C3 34
+#define CLK_I2C4 35
+#define CLK_I2C5 36
+#define CLK_SPI1 37
+#define CLK_CAPTURE_PWM2 38
+#define CLK_PWM2 39
+#define DBCLK_GPIO1 40
+#define DBCLK_GPIO2 41
+#define DBCLK_GPIO3 42
+#define DBCLK_GPIO4 43
+#define CLK_SARADC 44
+#define CLK_TIMER0 45
+#define CLK_TIMER1 46
+#define CLK_TIMER2 47
+#define CLK_TIMER3 48
+#define CLK_TIMER4 49
+#define CLK_TIMER5 50
+#define CLK_CAN 51
+#define CLK_NPU_TSADC 52
+#define CLK_NPU_TSADCPHY 53
+#define CLK_CPU_TSADC 54
+#define CLK_CPU_TSADCPHY 55
+#define CLK_CRYPTO_CORE 56
+#define CLK_CRYPTO_PKA 57
+#define MCLK_I2S0_TX_DIV 58
+#define MCLK_I2S0_TX_FRACDIV 59
+#define MCLK_I2S0_TX_MUX 60
+#define MCLK_I2S0_TX 61
+#define MCLK_I2S0_RX_DIV 62
+#define MCLK_I2S0_RX_FRACDIV 63
+#define MCLK_I2S0_RX_MUX 64
+#define MCLK_I2S0_RX 65
+#define MCLK_I2S0_TX_OUT2IO 66
+#define MCLK_I2S0_RX_OUT2IO 67
+#define MCLK_I2S1_DIV 68
+#define MCLK_I2S1_FRACDIV 69
+#define MCLK_I2S1_MUX 70
+#define MCLK_I2S1 71
+#define MCLK_I2S1_OUT2IO 72
+#define MCLK_I2S2_DIV 73
+#define MCLK_I2S2_FRACDIV 74
+#define MCLK_I2S2_MUX 75
+#define MCLK_I2S2 76
+#define MCLK_I2S2_OUT2IO 77
+#define MCLK_PDM 78
+#define SCLK_ADUPWM_DIV 79
+#define SCLK_AUDPWM_FRACDIV 80
+#define SCLK_AUDPWM_MUX 81
+#define SCLK_AUDPWM 82
+#define CLK_ACDCDIG_ADC 83
+#define CLK_ACDCDIG_DAC 84
+#define CLK_ACDCDIG_I2C 85
+#define CLK_VENC_CORE 86
+#define CLK_VDEC_CORE 87
+#define CLK_VDEC_CA 88
+#define CLK_VDEC_HEVC_CA 89
+#define CLK_RGA_CORE 90
+#define CLK_IEP_CORE 91
+#define CLK_ISP_DIV 92
+#define CLK_ISP_NP5 93
+#define CLK_ISP_NUX 94
+#define CLK_ISP 95
+#define CLK_CIF_OUT_DIV 96
+#define CLK_CIF_OUT_FRACDIV 97
+#define CLK_CIF_OUT_MUX 98
+#define CLK_CIF_OUT 99
+#define CLK_MIPICSI_OUT_DIV 100
+#define CLK_MIPICSI_OUT_FRACDIV 101
+#define CLK_MIPICSI_OUT_MUX 102
+#define CLK_MIPICSI_OUT 103
+#define CLK_ISPP_DIV 104
+#define CLK_ISPP_NP5 105
+#define CLK_ISPP_NUX 106
+#define CLK_ISPP 107
+#define CLK_SDMMC 108
+#define SCLK_SDMMC_DRV 109
+#define SCLK_SDMMC_SAMPLE 110
+#define CLK_SDIO 111
+#define SCLK_SDIO_DRV 112
+#define SCLK_SDIO_SAMPLE 113
+#define CLK_EMMC 114
+#define SCLK_EMMC_DRV 115
+#define SCLK_EMMC_SAMPLE 116
+#define CLK_NANDC 117
+#define SCLK_SFC 118
+#define CLK_USBHOST_UTMI_OHCI 119
+#define CLK_USBOTG_REF 120
+#define CLK_GMAC_DIV 121
+#define CLK_GMAC_RGMII_M0 122
+#define CLK_GMAC_SRC_M0 123
+#define CLK_GMAC_RGMII_M1 124
+#define CLK_GMAC_SRC_M1 125
+#define CLK_GMAC_SRC 126
+#define CLK_GMAC_REF 127
+#define CLK_GMAC_TX_SRC 128
+#define CLK_GMAC_TX_DIV5 129
+#define CLK_GMAC_TX_DIV50 130
+#define RGMII_MODE_CLK 131
+#define CLK_GMAC_RX_SRC 132
+#define CLK_GMAC_RX_DIV2 133
+#define CLK_GMAC_RX_DIV20 134
+#define RMII_MODE_CLK 135
+#define CLK_GMAC_TX_RX 136
+#define CLK_GMAC_PTPREF 137
+#define CLK_GMAC_ETHERNET_OUT 138
+#define CLK_DDRPHY 139
+#define CLK_DDR_MON 140
+#define TMCLK_DDR_MON 141
+#define CLK_NPU_DIV 142
+#define CLK_NPU_NP5 143
+#define CLK_CORE_NPU 144
+#define CLK_CORE_NPUPVTM 145
+#define CLK_NPUPVTM 146
+#define SCLK_DDRCLK 147
+#define CLK_OTP 148
+
+/* dclk */
+#define DCLK_DECOM 150
+#define DCLK_VOP_DIV 151
+#define DCLK_VOP_FRACDIV 152
+#define DCLK_VOP_MUX 153
+#define DCLK_VOP 154
+#define DCLK_CIF 155
+#define DCLK_CIFLITE 156
+
+/* aclk */
+#define ACLK_PDBUS 160
+#define ACLK_DMAC 161
+#define ACLK_DCF 162
+#define ACLK_SPINLOCK 163
+#define ACLK_DECOM 164
+#define ACLK_PDCRYPTO 165
+#define ACLK_CRYPTO 166
+#define ACLK_PDVEPU 167
+#define ACLK_VENC 168
+#define ACLK_PDVDEC 169
+#define ACLK_PDJPEG 170
+#define ACLK_VDEC 171
+#define ACLK_JPEG 172
+#define ACLK_PDVO 173
+#define ACLK_RGA 174
+#define ACLK_VOP 175
+#define ACLK_IEP 176
+#define ACLK_PDVI_DIV 177
+#define ACLK_PDVI_NP5 178
+#define ACLK_PDVI 179
+#define ACLK_ISP 180
+#define ACLK_CIF 181
+#define ACLK_CIFLITE 182
+#define ACLK_PDISPP_DIV 183
+#define ACLK_PDISPP_NP5 184
+#define ACLK_PDISPP 185
+#define ACLK_ISPP 186
+#define ACLK_PDPHP 187
+#define ACLK_PDUSB 188
+#define ACLK_USBOTG 189
+#define ACLK_PDGMAC 190
+#define ACLK_GMAC 191
+#define ACLK_PDNPU_DIV 192
+#define ACLK_PDNPU_NP5 193
+#define ACLK_PDNPU 194
+#define ACLK_NPU 195
+
+/* hclk */
+#define HCLK_PDCORE_NIU 200
+#define HCLK_PDUSB 201
+#define HCLK_PDCRYPTO 202
+#define HCLK_CRYPTO 203
+#define HCLK_PDAUDIO 204
+#define HCLK_I2S0 205
+#define HCLK_I2S1 206
+#define HCLK_I2S2 207
+#define HCLK_PDM 208
+#define HCLK_AUDPWM 209
+#define HCLK_PDVEPU 210
+#define HCLK_VENC 211
+#define HCLK_PDVDEC 212
+#define HCLK_PDJPEG 213
+#define HCLK_VDEC 214
+#define HCLK_JPEG 215
+#define HCLK_PDVO 216
+#define HCLK_RGA 217
+#define HCLK_VOP 218
+#define HCLK_IEP 219
+#define HCLK_PDVI 220
+#define HCLK_ISP 221
+#define HCLK_CIF 222
+#define HCLK_CIFLITE 223
+#define HCLK_PDISPP 224
+#define HCLK_ISPP 225
+#define HCLK_PDPHP 226
+#define HCLK_PDSDMMC 227
+#define HCLK_SDMMC 228
+#define HCLK_PDSDIO 229
+#define HCLK_SDIO 230
+#define HCLK_PDNVM 231
+#define HCLK_EMMC 232
+#define HCLK_NANDC 233
+#define HCLK_SFC 234
+#define HCLK_SFCXIP 235
+#define HCLK_PDBUS 236
+#define HCLK_USBHOST 237
+#define HCLK_USBHOST_ARB 238
+#define HCLK_PDNPU 239
+#define HCLK_NPU 240
+
+/* pclk */
+#define PCLK_CPUPVTM 245
+#define PCLK_PDBUS 246
+#define PCLK_DCF 247
+#define PCLK_WDT 248
+#define PCLK_MAILBOX 249
+#define PCLK_UART0 250
+#define PCLK_UART2 251
+#define PCLK_UART3 252
+#define PCLK_UART4 253
+#define PCLK_UART5 254
+#define PCLK_I2C1 255
+#define PCLK_I2C3 256
+#define PCLK_I2C4 257
+#define PCLK_I2C5 258
+#define PCLK_SPI1 259
+#define PCLK_PWM2 261
+#define PCLK_GPIO1 262
+#define PCLK_GPIO2 263
+#define PCLK_GPIO3 264
+#define PCLK_GPIO4 265
+#define PCLK_SARADC 266
+#define PCLK_TIMER 267
+#define PCLK_DECOM 268
+#define PCLK_CAN 269
+#define PCLK_NPU_TSADC 270
+#define PCLK_CPU_TSADC 271
+#define PCLK_ACDCDIG 272
+#define PCLK_PDVO 273
+#define PCLK_DSIHOST 274
+#define PCLK_PDVI 275
+#define PCLK_CSIHOST 276
+#define PCLK_PDGMAC 277
+#define PCLK_GMAC 278
+#define PCLK_PDDDR 279
+#define PCLK_DDR_MON 280
+#define PCLK_PDNPU 281
+#define PCLK_NPUPVTM 282
+#define PCLK_PDTOP 283
+#define PCLK_TOPCRU 284
+#define PCLK_TOPGRF 285
+#define PCLK_CPUEMADET 286
+#define PCLK_DDRPHY 287
+#define PCLK_DSIPHY 289
+#define PCLK_CSIPHY0 290
+#define PCLK_CSIPHY1 291
+#define PCLK_USBPHY_HOST 292
+#define PCLK_USBPHY_OTG 293
+#define PCLK_OTP 294
+
+#define CLK_NR_CLKS (PCLK_OTP + 1)
+
+/* pmu soft-reset indices */
+
+/* pmu_cru_softrst_con0 */
+#define SRST_PDPMU_NIU_P 0
+#define SRST_PMU_SGRF_P 1
+#define SRST_PMU_SGRF_REMAP_P 2
+#define SRST_I2C0_P 3
+#define SRST_I2C0 4
+#define SRST_I2C2_P 7
+#define SRST_I2C2 8
+#define SRST_UART1_P 9
+#define SRST_UART1 10
+#define SRST_PWM0_P 11
+#define SRST_PWM0 12
+#define SRST_PWM1_P 13
+#define SRST_PWM1 14
+#define SRST_DDR_FAIL_SAFE 15
+
+/* pmu_cru_softrst_con1 */
+#define SRST_GPIO0_P 17
+#define SRST_GPIO0_DB 18
+#define SRST_SPI0_P 19
+#define SRST_SPI0 20
+#define SRST_PMUGRF_P 21
+#define SRST_CHIPVEROTP_P 22
+#define SRST_PMUPVTM 24
+#define SRST_PMUPVTM_P 25
+#define SRST_PMUCRU_P 30
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_NL2 12
+#define SRST_CORE_NIU_A 13
+#define SRST_DBG_DAPLITE_P 14
+#define SRST_DAPLITE_P 15
+
+/* cru_softrst_con1 */
+#define SRST_PDBUS_NIU1_A 16
+#define SRST_PDBUS_NIU1_H 17
+#define SRST_PDBUS_NIU1_P 18
+#define SRST_PDBUS_NIU2_A 19
+#define SRST_PDBUS_NIU2_H 20
+#define SRST_PDBUS_NIU3_A 21
+#define SRST_PDBUS_NIU3_H 22
+#define SRST_PDBUS_HOLD_NIU1_A 23
+#define SRST_DBG_NIU_P 24
+#define SRST_PDCORE_NIIU_H 25
+#define SRST_MUC_NIU 26
+#define SRST_DCF_A 29
+#define SRST_DCF_P 30
+#define SRST_SYSTEM_SRAM_A 31
+
+/* cru_softrst_con2 */
+#define SRST_I2C1_P 32
+#define SRST_I2C1 33
+#define SRST_I2C3_P 34
+#define SRST_I2C3 35
+#define SRST_I2C4_P 36
+#define SRST_I2C4 37
+#define SRST_I2C5_P 38
+#define SRST_I2C5 39
+#define SRST_SPI1_P 40
+#define SRST_SPI1 41
+#define SRST_MCU_CORE 42
+#define SRST_PWM2_P 44
+#define SRST_PWM2 45
+#define SRST_SPINLOCK_A 46
+
+/* cru_softrst_con3 */
+#define SRST_UART0_P 48
+#define SRST_UART0 49
+#define SRST_UART2_P 50
+#define SRST_UART2 51
+#define SRST_UART3_P 52
+#define SRST_UART3 53
+#define SRST_UART4_P 54
+#define SRST_UART4 55
+#define SRST_UART5_P 56
+#define SRST_UART5 57
+#define SRST_WDT_P 58
+#define SRST_SARADC_P 59
+#define SRST_GRF_P 61
+#define SRST_TIMER_P 62
+#define SRST_MAILBOX_P 63
+
+/* cru_softrst_con4 */
+#define SRST_TIMER0 64
+#define SRST_TIMER1 65
+#define SRST_TIMER2 66
+#define SRST_TIMER3 67
+#define SRST_TIMER4 68
+#define SRST_TIMER5 69
+#define SRST_INTMUX_P 70
+#define SRST_GPIO1_P 72
+#define SRST_GPIO1_DB 73
+#define SRST_GPIO2_P 74
+#define SRST_GPIO2_DB 75
+#define SRST_GPIO3_P 76
+#define SRST_GPIO3_DB 77
+#define SRST_GPIO4_P 78
+#define SRST_GPIO4_DB 79
+
+/* cru_softrst_con5 */
+#define SRST_CAN_P 80
+#define SRST_CAN 81
+#define SRST_DECOM_A 85
+#define SRST_DECOM_P 86
+#define SRST_DECOM_D 87
+#define SRST_PDCRYPTO_NIU_A 88
+#define SRST_PDCRYPTO_NIU_H 89
+#define SRST_CRYPTO_A 90
+#define SRST_CRYPTO_H 91
+#define SRST_CRYPTO_CORE 92
+#define SRST_CRYPTO_PKA 93
+#define SRST_SGRF_P 95
+
+/* cru_softrst_con6 */
+#define SRST_PDAUDIO_NIU_H 96
+#define SRST_PDAUDIO_NIU_P 97
+#define SRST_I2S0_H 98
+#define SRST_I2S0_TX_M 99
+#define SRST_I2S0_RX_M 100
+#define SRST_I2S1_H 101
+#define SRST_I2S1_M 102
+#define SRST_I2S2_H 103
+#define SRST_I2S2_M 104
+#define SRST_PDM_H 105
+#define SRST_PDM_M 106
+#define SRST_AUDPWM_H 107
+#define SRST_AUDPWM 108
+#define SRST_ACDCDIG_P 109
+#define SRST_ACDCDIG 110
+
+/* cru_softrst_con7 */
+#define SRST_PDVEPU_NIU_A 112
+#define SRST_PDVEPU_NIU_H 113
+#define SRST_VENC_A 114
+#define SRST_VENC_H 115
+#define SRST_VENC_CORE 116
+#define SRST_PDVDEC_NIU_A 117
+#define SRST_PDVDEC_NIU_H 118
+#define SRST_VDEC_A 119
+#define SRST_VDEC_H 120
+#define SRST_VDEC_CORE 121
+#define SRST_VDEC_CA 122
+#define SRST_VDEC_HEVC_CA 123
+#define SRST_PDJPEG_NIU_A 124
+#define SRST_PDJPEG_NIU_H 125
+#define SRST_JPEG_A 126
+#define SRST_JPEG_H 127
+
+/* cru_softrst_con8 */
+#define SRST_PDVO_NIU_A 128
+#define SRST_PDVO_NIU_H 129
+#define SRST_PDVO_NIU_P 130
+#define SRST_RGA_A 131
+#define SRST_RGA_H 132
+#define SRST_RGA_CORE 133
+#define SRST_VOP_A 134
+#define SRST_VOP_H 135
+#define SRST_VOP_D 136
+#define SRST_TXBYTEHS_DSIHOST 137
+#define SRST_DSIHOST_P 138
+#define SRST_IEP_A 139
+#define SRST_IEP_H 140
+#define SRST_IEP_CORE 141
+#define SRST_ISP_RX_P 142
+
+/* cru_softrst_con9 */
+#define SRST_PDVI_NIU_A 144
+#define SRST_PDVI_NIU_H 145
+#define SRST_PDVI_NIU_P 146
+#define SRST_ISP 147
+#define SRST_CIF_A 148
+#define SRST_CIF_H 149
+#define SRST_CIF_D 150
+#define SRST_CIF_P 151
+#define SRST_CIF_I 152
+#define SRST_CIF_RX_P 153
+#define SRST_PDISPP_NIU_A 154
+#define SRST_PDISPP_NIU_H 155
+#define SRST_ISPP_A 156
+#define SRST_ISPP_H 157
+#define SRST_ISPP 158
+#define SRST_CSIHOST_P 159
+
+/* cru_softrst_con10 */
+#define SRST_PDPHPMID_NIU_A 160
+#define SRST_PDPHPMID_NIU_H 161
+#define SRST_PDNVM_NIU_H 163
+#define SRST_SDMMC_H 164
+#define SRST_SDIO_H 165
+#define SRST_EMMC_H 166
+#define SRST_SFC_H 167
+#define SRST_SFCXIP_H 168
+#define SRST_SFC 169
+#define SRST_NANDC_H 170
+#define SRST_NANDC 171
+#define SRST_PDSDMMC_H 173
+#define SRST_PDSDIO_H 174
+
+/* cru_softrst_con11 */
+#define SRST_PDUSB_NIU_A 176
+#define SRST_PDUSB_NIU_H 177
+#define SRST_USBHOST_H 178
+#define SRST_USBHOST_ARB_H 179
+#define SRST_USBHOST_UTMI 180
+#define SRST_USBOTG_A 181
+#define SRST_USBPHY_OTG_P 182
+#define SRST_USBPHY_HOST_P 183
+#define SRST_USBPHYPOR_OTG 184
+#define SRST_USBPHYPOR_HOST 185
+#define SRST_PDGMAC_NIU_A 188
+#define SRST_PDGMAC_NIU_P 189
+#define SRST_GMAC_A 190
+
+/* cru_softrst_con12 */
+#define SRST_DDR_DFICTL_P 193
+#define SRST_DDR_MON_P 194
+#define SRST_DDR_STANDBY_P 195
+#define SRST_DDR_GRF_P 196
+#define SRST_DDR_MSCH_P 197
+#define SRST_DDR_SPLIT_A 198
+#define SRST_DDR_MSCH 199
+#define SRST_DDR_DFICTL 202
+#define SRST_DDR_STANDBY 203
+#define SRST_NPUMCU_NIU 205
+#define SRST_DDRPHY_P 206
+#define SRST_DDRPHY 207
+
+/* cru_softrst_con13 */
+#define SRST_PDNPU_NIU_A 208
+#define SRST_PDNPU_NIU_H 209
+#define SRST_PDNPU_NIU_P 210
+#define SRST_NPU_A 211
+#define SRST_NPU_H 212
+#define SRST_NPU 213
+#define SRST_NPUPVTM_P 214
+#define SRST_NPUPVTM 215
+#define SRST_NPU_TSADC_P 216
+#define SRST_NPU_TSADC 217
+#define SRST_NPU_TSADCPHY 218
+#define SRST_CIFLITE_A 220
+#define SRST_CIFLITE_H 221
+#define SRST_CIFLITE_D 222
+#define SRST_CIFLITE_RX_P 223
+
+/* cru_softrst_con14 */
+#define SRST_TOPNIU_P 224
+#define SRST_TOPCRU_P 225
+#define SRST_TOPGRF_P 226
+#define SRST_CPUEMADET_P 227
+#define SRST_CSIPHY0_P 228
+#define SRST_CSIPHY1_P 229
+#define SRST_DSIPHY_P 230
+#define SRST_CPU_TSADC_P 232
+#define SRST_CPU_TSADC 233
+#define SRST_CPU_TSADCPHY 234
+#define SRST_CPUPVTM_P 235
+#define SRST_CPUPVTM 236
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index ea9f91b4eb1a..42133af6d6b9 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -185,6 +185,74 @@
#define CORE_NR_CLK 6
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_PCIE_USER 2
+#define CLK_GOUT_FSYS0_BUS_PCLK 3
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_REFCLK 4
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_REFCLK 5
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_DBI_ACLK 6
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_MSTR_ACLK 7
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_SLV_ACLK 8
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_DBI_ACLK 9
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_MSTR_ACLK 10
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_SLV_ACLK 11
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_PIPE_CLK 12
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L0_CLK 13
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L0_CLK 14
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_REFCLK 15
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_REFCLK 16
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_DBI_ACLK 17
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_MSTR_ACLK 18
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_SLV_ACLK 19
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_DBI_ACLK 20
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_MSTR_ACLK 21
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_SLV_ACLK 22
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_PIPE_CLK 23
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L1_CLK 24
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L1_CLK 25
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_REFCLK 26
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_REFCLK 27
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_DBI_ACLK 28
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_MSTR_ACLK 29
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_SLV_ACLK 30
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_DBI_ACLK 31
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_MSTR_ACLK 32
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_SLV_ACLK 33
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_PIPE_CLK 34
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK 35
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK 36
+
+#define FSYS0_NR_CLK 37
+
+/* CMU_FSYS1 */
+#define FOUT_MMC_PLL 1
+
+#define CLK_MOUT_FSYS1_BUS_USER 2
+#define CLK_MOUT_FSYS1_MMC_PLL 3
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 4
+#define CLK_MOUT_FSYS1_USBDRD_USER 5
+#define CLK_MOUT_FSYS1_MMC_CARD 6
+
+#define CLK_DOUT_FSYS1_MMC_CARD 7
+
+#define CLK_GOUT_FSYS1_PCLK 8
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 9
+#define CLK_GOUT_FSYS1_MMC_CARD_ACLK 10
+#define CLK_GOUT_FSYS1_USB20DRD_0_REFCLK 11
+#define CLK_GOUT_FSYS1_USB20DRD_1_REFCLK 12
+#define CLK_GOUT_FSYS1_USB30DRD_0_REFCLK 13
+#define CLK_GOUT_FSYS1_USB30DRD_1_REFCLK 14
+#define CLK_GOUT_FSYS1_USB20_0_ACLK 15
+#define CLK_GOUT_FSYS1_USB20_1_ACLK 16
+#define CLK_GOUT_FSYS1_USB30_0_ACLK 17
+#define CLK_GOUT_FSYS1_USB30_1_ACLK 18
+
+#define FSYS1_NR_CLK 19
+
/* CMU_FSYS2 */
#define CLK_MOUT_FSYS2_BUS_USER 1
#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2
@@ -226,21 +294,21 @@
#define CLK_GOUT_PERIC0_IPCLK_8 28
#define CLK_GOUT_PERIC0_IPCLK_9 29
#define CLK_GOUT_PERIC0_IPCLK_10 30
-#define CLK_GOUT_PERIC0_IPCLK_11 30
-#define CLK_GOUT_PERIC0_PCLK_0 31
-#define CLK_GOUT_PERIC0_PCLK_1 32
-#define CLK_GOUT_PERIC0_PCLK_2 33
-#define CLK_GOUT_PERIC0_PCLK_3 34
-#define CLK_GOUT_PERIC0_PCLK_4 35
-#define CLK_GOUT_PERIC0_PCLK_5 36
-#define CLK_GOUT_PERIC0_PCLK_6 37
-#define CLK_GOUT_PERIC0_PCLK_7 38
-#define CLK_GOUT_PERIC0_PCLK_8 39
-#define CLK_GOUT_PERIC0_PCLK_9 40
-#define CLK_GOUT_PERIC0_PCLK_10 41
-#define CLK_GOUT_PERIC0_PCLK_11 42
-
-#define PERIC0_NR_CLK 43
+#define CLK_GOUT_PERIC0_IPCLK_11 31
+#define CLK_GOUT_PERIC0_PCLK_0 32
+#define CLK_GOUT_PERIC0_PCLK_1 33
+#define CLK_GOUT_PERIC0_PCLK_2 34
+#define CLK_GOUT_PERIC0_PCLK_3 35
+#define CLK_GOUT_PERIC0_PCLK_4 36
+#define CLK_GOUT_PERIC0_PCLK_5 37
+#define CLK_GOUT_PERIC0_PCLK_6 38
+#define CLK_GOUT_PERIC0_PCLK_7 39
+#define CLK_GOUT_PERIC0_PCLK_8 40
+#define CLK_GOUT_PERIC0_PCLK_9 41
+#define CLK_GOUT_PERIC0_PCLK_10 42
+#define CLK_GOUT_PERIC0_PCLK_11 43
+
+#define PERIC0_NR_CLK 44
/* CMU_PERIC1 */
#define CLK_MOUT_PERIC1_BUS_USER 1
@@ -272,21 +340,21 @@
#define CLK_GOUT_PERIC1_IPCLK_8 28
#define CLK_GOUT_PERIC1_IPCLK_9 29
#define CLK_GOUT_PERIC1_IPCLK_10 30
-#define CLK_GOUT_PERIC1_IPCLK_11 30
-#define CLK_GOUT_PERIC1_PCLK_0 31
-#define CLK_GOUT_PERIC1_PCLK_1 32
-#define CLK_GOUT_PERIC1_PCLK_2 33
-#define CLK_GOUT_PERIC1_PCLK_3 34
-#define CLK_GOUT_PERIC1_PCLK_4 35
-#define CLK_GOUT_PERIC1_PCLK_5 36
-#define CLK_GOUT_PERIC1_PCLK_6 37
-#define CLK_GOUT_PERIC1_PCLK_7 38
-#define CLK_GOUT_PERIC1_PCLK_8 39
-#define CLK_GOUT_PERIC1_PCLK_9 40
-#define CLK_GOUT_PERIC1_PCLK_10 41
-#define CLK_GOUT_PERIC1_PCLK_11 42
-
-#define PERIC1_NR_CLK 43
+#define CLK_GOUT_PERIC1_IPCLK_11 31
+#define CLK_GOUT_PERIC1_PCLK_0 32
+#define CLK_GOUT_PERIC1_PCLK_1 33
+#define CLK_GOUT_PERIC1_PCLK_2 34
+#define CLK_GOUT_PERIC1_PCLK_3 35
+#define CLK_GOUT_PERIC1_PCLK_4 36
+#define CLK_GOUT_PERIC1_PCLK_5 37
+#define CLK_GOUT_PERIC1_PCLK_6 38
+#define CLK_GOUT_PERIC1_PCLK_7 39
+#define CLK_GOUT_PERIC1_PCLK_8 40
+#define CLK_GOUT_PERIC1_PCLK_9 41
+#define CLK_GOUT_PERIC1_PCLK_10 42
+#define CLK_GOUT_PERIC1_PCLK_11 43
+
+#define PERIC1_NR_CLK 44
/* CMU_PERIS */
#define CLK_MOUT_PERIS_BUS_USER 1
diff --git a/include/dt-bindings/clk/versaclock.h b/include/dt-bindings/clock/versaclock.h
index c6a6a0946564..c6a6a0946564 100644
--- a/include/dt-bindings/clk/versaclock.h
+++ b/include/dt-bindings/clock/versaclock.h
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
index 43885056557c..1675de05ad33 100644
--- a/include/dt-bindings/firmware/imx/rsrc.h
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -37,10 +37,14 @@
#define IMX_SC_R_DC_0_BLIT2 21
#define IMX_SC_R_DC_0_BLIT_OUT 22
#define IMX_SC_R_PERF 23
+#define IMX_SC_R_USB_1_PHY 24
#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_V2X_MU_0 26
+#define IMX_SC_R_V2X_MU_1 27
#define IMX_SC_R_DC_0_VIDEO0 28
#define IMX_SC_R_DC_0_VIDEO1 29
#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_V2X_MU_2 31
#define IMX_SC_R_DC_0 32
#define IMX_SC_R_GPU_2_PID0 33
#define IMX_SC_R_DC_0_PLL_0 34
@@ -49,7 +53,10 @@
#define IMX_SC_R_DC_1_BLIT1 37
#define IMX_SC_R_DC_1_BLIT2 38
#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_V2X_MU_3 40
+#define IMX_SC_R_V2X_MU_4 41
#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_SECVIO 44
#define IMX_SC_R_DC_1_VIDEO0 45
#define IMX_SC_R_DC_1_VIDEO1 46
#define IMX_SC_R_DC_1_FRAC0 47
diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
index 70f99dbdbb42..866d36530583 100644
--- a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
+++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
@@ -13,4 +13,7 @@
/* pressure channel index */
#define AT91_SAMA5D2_ADC_P_CHANNEL 26
+/* SAMA7G5 Temperature sensor channel index. */
+#define AT91_SAMA7G5_ADC_TEMP_CHANNEL 31
+
#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
new file mode 100644
index 000000000000..6ee725547763
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+#define __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+
+/* ADC Channel Index */
+#define MT6370_CHAN_VBUSDIV5 0
+#define MT6370_CHAN_VBUSDIV2 1
+#define MT6370_CHAN_VSYS 2
+#define MT6370_CHAN_VBAT 3
+#define MT6370_CHAN_TS_BAT 4
+#define MT6370_CHAN_IBUS 5
+#define MT6370_CHAN_IBAT 6
+#define MT6370_CHAN_CHG_VDDP 7
+#define MT6370_CHAN_TEMP_JC 8
+#define MT6370_CHAN_MAX 9
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/irqc-rzg2l.h b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
new file mode 100644
index 000000000000..34ce778885a1
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family IRQC bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_IRQC_RZG2L_H
+#define __DT_BINDINGS_IRQC_RZG2L_H
+
+/* NMI maps to SPI0 */
+#define RZG2L_NMI 0
+
+/* IRQ0-7 map to SPI1-8 */
+#define RZG2L_IRQ0 1
+#define RZG2L_IRQ1 2
+#define RZG2L_IRQ2 3
+#define RZG2L_IRQ3 4
+#define RZG2L_IRQ4 5
+#define RZG2L_IRQ5 6
+#define RZG2L_IRQ6 7
+#define RZG2L_IRQ7 8
+
+#endif /* __DT_BINDINGS_IRQC_RZG2L_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
index 3be89a7c20a9..9a0d33d027ff 100644
--- a/include/dt-bindings/leds/common.h
+++ b/include/dt-bindings/leds/common.h
@@ -33,7 +33,12 @@
#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */
#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color,
so this would include RGBW and similar */
-#define LED_COLOR_ID_MAX 10
+#define LED_COLOR_ID_PURPLE 10
+#define LED_COLOR_ID_ORANGE 11
+#define LED_COLOR_ID_PINK 12
+#define LED_COLOR_ID_CYAN 13
+#define LED_COLOR_ID_LIME 14
+#define LED_COLOR_ID_MAX 15
/* Standard LED functions */
/* Keyboard LEDs, usually it would be input4::capslock etc. */
diff --git a/include/dt-bindings/memory/mt6795-larb-port.h b/include/dt-bindings/memory/mt6795-larb-port.h
new file mode 100644
index 000000000000..58cf6a6b6372
--- /dev/null
+++ b/include/dt-bindings/memory/mt6795-larb-port.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 9)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 10)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 11)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 12)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 13)
+
+/* larb1 */
+#define M4U_PORT_VDEC_MC MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_VDEC_PP MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_VDEC_UFO MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_VDEC_VLD MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_VDEC_VLD2 MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VDEC_AVC_MV MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VDEC_PRED_RD MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_VDEC_PRED_WR MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_VDEC_PPWRAP MTK_M4U_ID(M4U_LARB1_ID, 8)
+
+/* larb2 */
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_CAM_IMGO_S MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_CAM_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_CAM_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_CAM_RB MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_CAM_RP MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_CAM_WR MTK_M4U_ID(M4U_LARB2_ID, 20)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_REMDC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_REMDC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_REMDC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4 */
+#define M4U_PORT_MJC_MV_RD MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_MJC_MV_WR MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_MJC_DMA_RD MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_MJC_DMA_WR MTK_M4U_ID(M4U_LARB4_ID, 3)
+
+#endif
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
index 62987b47ce81..bd71cc1d7990 100644
--- a/include/dt-bindings/memory/tegra234-mc.h
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -34,6 +34,16 @@
#define TEGRA234_SID_HOST1X 0x27
#define TEGRA234_SID_VIC 0x34
+/* Shared stream IDs */
+#define TEGRA234_SID_HOST1X_CTX0 0x35
+#define TEGRA234_SID_HOST1X_CTX1 0x36
+#define TEGRA234_SID_HOST1X_CTX2 0x37
+#define TEGRA234_SID_HOST1X_CTX3 0x38
+#define TEGRA234_SID_HOST1X_CTX4 0x39
+#define TEGRA234_SID_HOST1X_CTX5 0x3a
+#define TEGRA234_SID_HOST1X_CTX6 0x3b
+#define TEGRA234_SID_HOST1X_CTX7 0x3c
+
/*
* memory client IDs
*/
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index f48c9acf251e..6b901b342348 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -22,5 +22,6 @@
#define PHY_TYPE_QSGMII 9
#define PHY_TYPE_DPHY 10
#define PHY_TYPE_CPHY 11
+#define PHY_TYPE_USXGMII 12
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
index a5204ab91d3e..54df633f9bfe 100644
--- a/include/dt-bindings/pinctrl/k3.h
+++ b/include/dt-bindings/pinctrl/k3.h
@@ -29,19 +29,22 @@
#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN)
+#define AM62AX_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM62AX_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
+#define AM62X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM62X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
+#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
#define J721S2_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
#define J721S2_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM62X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM62X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
#endif
diff --git a/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
new file mode 100644
index 000000000000..2688da2f621f
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
@@ -0,0 +1,1280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ */
+
+#ifndef __MEDIATEK_MT8188_PINFUNC_H
+#define __MEDIATEK_MT8188_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_B_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_O_SPIM5_CSB (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_O_UTXD1 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_O_DMIC3_CLK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_O_I2SO2_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_B0_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_B_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_O_SPIM5_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I1_URXD1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_B0_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_B_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_O_URTS1 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_B0_I2SIN_WS (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_B0_I2SO2_WS (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_B0_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_B_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I1_UCTS1 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_O_DMIC4_CLK (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_O_I2SO2_D0 (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_B0_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_B_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_O_I2SO1_MCK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_O_I2SO2_D1 (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_B0_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_B_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_O_I2SO1_BCK (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_O_I2SO2_D2 (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_B0_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_B_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_O_I2SO1_WS (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_O_DMIC1_CLK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_O_I2SO2_D3 (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_B_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_O_SPIM3_CSB (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_O_CMVREF0 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_O_CLKM0 (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_B0_DBG_MON_A6 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_B_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_O_SPIM3_CLK (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_O_CMVREF1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_O_CLKM1 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_B0_DBG_MON_A7 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_B_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_O_DMIC2_CLK (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_O_CMFLASH0 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_O_PWM_0 (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_B0_DBG_MON_A8 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_B_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I0_TDMIN_DI (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_O_CMFLASH1 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_O_PWM_1 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_B0_DBG_MON_A9 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_B_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_O_SPDIF_OUT (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_O_I2SO1_D0 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_O_CMVREF6 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_B0_DBG_MON_A10 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_B_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_O_SPIM4_CSB (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_B1_JTMS_SEL3 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_B_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_O_SPIM4_CLK (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I0_JTCK_SEL3 (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_B_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_I1_JTDI_SEL3 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_B_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_O_JTDO_SEL3 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_B_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_O_UTXD3 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_I1_JTRSTn_SEL3 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_B_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_I1_URXD3 (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_O_CMFLASH2 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_O_CMVREF7 (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_B_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_O_CMFLASH0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_O_CMVREF4 (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_O_UTXD1 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_B0_DBG_MON_A11 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_B_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_O_CMFLASH1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_O_CMVREF5 (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_I1_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_B0_DBG_MON_A12 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_B_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_O_CMFLASH2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_O_CLKM2 (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_O_URTS1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_B0_DBG_MON_A13 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_B_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_O_CMFLASH3 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_O_CLKM3 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_I0_TDMIN_DI (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_I1_UCTS1 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_B0_DBG_MON_A14 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_B_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_O_CMMCLK0 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_B0_DBG_MON_A15 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_B_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_O_CMMCLK1 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_O_PWM_2 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_B0_DBG_MON_A16 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_B_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_O_CMMCLK2 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_O_PWM_3 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_B_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_O_LCM_RST (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_O_LCM1_RST (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(25) | 3)
+
+#define PINMUX_GPIO26__FUNC_B_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I0_DSI_TE (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_I0_DSI1_TE (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(26) | 3)
+
+#define PINMUX_GPIO27__FUNC_B_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_O_LCM1_RST (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_O_LCM_RST (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_O_CMVREF2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_O_PWM_2 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_B0_DBG_MON_A17 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_B_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I0_DSI1_TE (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_I0_DSI_TE (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_O_CMVREF3 (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_O_PWM_3 (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_B0_DBG_MON_A18 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_B_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM1 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_B_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM1 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_O_CMFLASH3 (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_O_PWM_1 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_B0_DBG_MON_A19 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_B_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_O_UTXD0 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_O_MD32_0_TXD (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_O_MD32_1_TXD (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_B_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I1_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_B_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_O_UTXD1 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_O_URTS2 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_O_MD32_0_TXD (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_B_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I1_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I1_UCTS2 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_B_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_O_UTXD2 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_O_URTS1 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_O_MD32_1_TXD (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_B0_DBG_MON_A20 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_B_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_I1_URXD2 (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I1_UCTS1 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_B0_DBG_MON_A21 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_B_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_B1_JTMS_SEL1 (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I0_UDI_TMS (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_B_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_I0_JTCK_SEL1 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I0_UDI_TCK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_B_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_I1_JTDI_SEL1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I0_UDI_TDI (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_B_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_O_JTDO_SEL1 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_O_UDI_TDO (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_B_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_I1_JTRSTn_SEL1 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_I0_UDI_NTRST (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_B_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_B1_KPCOL0 (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_B_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_B1_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_O_CMFLASH2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_B_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_B1_KPROW0 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_B_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_B1_KPROW1 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_O_CMFLASH3 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_B_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_O_PWM_0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_B0_DBG_MON_A22 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_B_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_I1_WAKEN (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_O_GDU_TROOPS_DET0 (MTK_PIN_NO(47) | 6)
+
+#define PINMUX_GPIO48__FUNC_B_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_O_PERSTN (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_O_GDU_TROOPS_DET1 (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_B_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_B1_CLKREQN (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_O_GDU_TROOPS_DET2 (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_B_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I1_IDDIG_1P (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_B_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_B_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_B_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I1_IDDIG_2P (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_B_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_B_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_B1_SCL0 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_B_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_B1_SDA0 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_B_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_B1_SCL1 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_B_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_B1_SDA1 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_B_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_B1_SCL2 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(59) | 3)
+
+#define PINMUX_GPIO60__FUNC_B_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_B1_SDA2 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(60) | 3)
+
+#define PINMUX_GPIO61__FUNC_B_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_B1_SCL3 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+#define PINMUX_GPIO61__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(61) | 4)
+
+#define PINMUX_GPIO62__FUNC_B_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_B1_SDA3 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+#define PINMUX_GPIO62__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(62) | 4)
+
+#define PINMUX_GPIO63__FUNC_B_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_B1_SCL4 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_B_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_B1_SDA4 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_B_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_B1_SCL5 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(65) | 3)
+
+#define PINMUX_GPIO66__FUNC_B_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_B1_SDA5 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(66) | 3)
+
+#define PINMUX_GPIO67__FUNC_B_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_B1_SCL6 (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(67) | 4)
+
+#define PINMUX_GPIO68__FUNC_B_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_B1_SDA6 (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(68) | 4)
+
+#define PINMUX_GPIO69__FUNC_B_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_O_SPIM0_CSB (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_O_SCP_SPI0_CS (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_O_DMIC3_CLK (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_O_CMVREF0 (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_O_GDU_SUM_TROOP0_0 (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_B0_DBG_MON_A23 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_B_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_O_SPIM0_CLK (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_O_SCP_SPI0_CK (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_O_CMVREF1 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_O_GDU_SUM_TROOP0_1 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_B0_DBG_MON_A24 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_B_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_O_SCP_SPI0_MO (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_O_CMVREF2 (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_O_GDU_SUM_TROOP0_2 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_B0_DBG_MON_A25 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_B_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_B0_SPIM0_MISO (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_I0_SCP_SPI0_MI (MTK_PIN_NO(72) | 2)
+#define PINMUX_GPIO72__FUNC_O_DMIC4_CLK (MTK_PIN_NO(72) | 3)
+#define PINMUX_GPIO72__FUNC_O_CMVREF3 (MTK_PIN_NO(72) | 5)
+#define PINMUX_GPIO72__FUNC_O_GDU_SUM_TROOP1_0 (MTK_PIN_NO(72) | 6)
+#define PINMUX_GPIO72__FUNC_B0_DBG_MON_A26 (MTK_PIN_NO(72) | 7)
+
+#define PINMUX_GPIO73__FUNC_B_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_B0_SPIM0_MIO2 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_O_UTXD3 (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(73) | 3)
+#define PINMUX_GPIO73__FUNC_O_CLKM0 (MTK_PIN_NO(73) | 4)
+#define PINMUX_GPIO73__FUNC_O_CMVREF4 (MTK_PIN_NO(73) | 5)
+#define PINMUX_GPIO73__FUNC_O_GDU_SUM_TROOP1_1 (MTK_PIN_NO(73) | 6)
+#define PINMUX_GPIO73__FUNC_B0_DBG_MON_A27 (MTK_PIN_NO(73) | 7)
+
+#define PINMUX_GPIO74__FUNC_B_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_B0_SPIM0_MIO3 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_I1_URXD3 (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(74) | 3)
+#define PINMUX_GPIO74__FUNC_O_CLKM1 (MTK_PIN_NO(74) | 4)
+#define PINMUX_GPIO74__FUNC_O_CMVREF5 (MTK_PIN_NO(74) | 5)
+#define PINMUX_GPIO74__FUNC_O_GDU_SUM_TROOP1_2 (MTK_PIN_NO(74) | 6)
+#define PINMUX_GPIO74__FUNC_B0_DBG_MON_A28 (MTK_PIN_NO(74) | 7)
+
+#define PINMUX_GPIO75__FUNC_B_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_O_SPIM1_CSB (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_O_SCP_SPI1_A_CS (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(75) | 3)
+#define PINMUX_GPIO75__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(75) | 4)
+#define PINMUX_GPIO75__FUNC_O_CMVREF6 (MTK_PIN_NO(75) | 5)
+#define PINMUX_GPIO75__FUNC_O_GDU_SUM_TROOP2_0 (MTK_PIN_NO(75) | 6)
+#define PINMUX_GPIO75__FUNC_B0_DBG_MON_A29 (MTK_PIN_NO(75) | 7)
+
+#define PINMUX_GPIO76__FUNC_B_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_O_SPIM1_CLK (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_O_SCP_SPI1_A_CK (MTK_PIN_NO(76) | 2)
+#define PINMUX_GPIO76__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(76) | 3)
+#define PINMUX_GPIO76__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(76) | 4)
+#define PINMUX_GPIO76__FUNC_O_CMVREF7 (MTK_PIN_NO(76) | 5)
+#define PINMUX_GPIO76__FUNC_O_GDU_SUM_TROOP2_1 (MTK_PIN_NO(76) | 6)
+#define PINMUX_GPIO76__FUNC_B0_DBG_MON_A30 (MTK_PIN_NO(76) | 7)
+
+#define PINMUX_GPIO77__FUNC_B_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_O_SCP_SPI1_A_MO (MTK_PIN_NO(77) | 2)
+#define PINMUX_GPIO77__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(77) | 3)
+#define PINMUX_GPIO77__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(77) | 4)
+#define PINMUX_GPIO77__FUNC_O_GDU_SUM_TROOP2_2 (MTK_PIN_NO(77) | 6)
+#define PINMUX_GPIO77__FUNC_B0_DBG_MON_A31 (MTK_PIN_NO(77) | 7)
+
+#define PINMUX_GPIO78__FUNC_B_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_B0_SPIM1_MISO (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_I0_SCP_SPI1_A_MI (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_I0_TDMIN_DI (MTK_PIN_NO(78) | 3)
+#define PINMUX_GPIO78__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(78) | 4)
+#define PINMUX_GPIO78__FUNC_B0_DBG_MON_A32 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_B_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_O_SPIM2_CSB (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_O_SCP_SPI2_CS (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_O_I2SO1_MCK (MTK_PIN_NO(79) | 3)
+#define PINMUX_GPIO79__FUNC_O_UTXD2 (MTK_PIN_NO(79) | 4)
+#define PINMUX_GPIO79__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(79) | 5)
+#define PINMUX_GPIO79__FUNC_B0_PCM_SYNC (MTK_PIN_NO(79) | 6)
+#define PINMUX_GPIO79__FUNC_B0_DBG_MON_B0 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_B_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_O_SPIM2_CLK (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_O_SCP_SPI2_CK (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_O_I2SO1_BCK (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_I1_URXD2 (MTK_PIN_NO(80) | 4)
+#define PINMUX_GPIO80__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(80) | 5)
+#define PINMUX_GPIO80__FUNC_B0_PCM_CLK (MTK_PIN_NO(80) | 6)
+#define PINMUX_GPIO80__FUNC_B0_DBG_MON_B1 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_B_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_O_SCP_SPI2_MO (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_O_I2SO1_WS (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_O_URTS2 (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(81) | 5)
+#define PINMUX_GPIO81__FUNC_O_PCM_DO (MTK_PIN_NO(81) | 6)
+#define PINMUX_GPIO81__FUNC_B0_DBG_MON_B2 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_B_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_B0_SPIM2_MISO (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_I0_SCP_SPI2_MI (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_O_I2SO1_D0 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_I1_UCTS2 (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(82) | 5)
+#define PINMUX_GPIO82__FUNC_I0_PCM_DI (MTK_PIN_NO(82) | 6)
+#define PINMUX_GPIO82__FUNC_B0_DBG_MON_B3 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_B_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_I1_IDDIG (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_B_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_O_USB_DRVVBUS (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_B_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_I0_VBUSVALID (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_B_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_I1_IDDIG_1P (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_O_UTXD1 (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_O_URTS2 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_O_PWM_2 (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_B0_DBG_MON_B4 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_B_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_I1_URXD1 (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I1_UCTS2 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_O_PWM_3 (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_B0_DBG_MON_B5 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_B_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_O_UTXD2 (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_O_URTS1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_O_CLKM2 (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_B0_DBG_MON_B6 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_B_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_I1_IDDIG_2P (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_I1_URXD2 (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I1_UCTS1 (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_O_CLKM3 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_B0_DBG_MON_B7 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_B_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_O_UTXD3 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_O_MD32_0_TXD (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_O_MD32_1_TXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_B0_DBG_MON_B8 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_B_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_I1_URXD3 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(91) | 6)
+#define PINMUX_GPIO91__FUNC_B0_DBG_MON_B9 (MTK_PIN_NO(91) | 7)
+
+#define PINMUX_GPIO92__FUNC_B_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_O_PWRAP_SPI0_CSN (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_B_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_O_PWRAP_SPI0_CK (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_B_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(94) | 2)
+
+#define PINMUX_GPIO95__FUNC_B_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(95) | 2)
+
+#define PINMUX_GPIO96__FUNC_B_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_O_SRCLKENA0 (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_B_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_O_SRCLKENA1 (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_B_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_O_SCP_VREQ_VAO (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(98) | 2)
+
+#define PINMUX_GPIO99__FUNC_B_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_I0_RTC32K_CK (MTK_PIN_NO(99) | 1)
+
+#define PINMUX_GPIO100__FUNC_B_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_O_WATCHDOG (MTK_PIN_NO(100) | 1)
+
+#define PINMUX_GPIO101__FUNC_B_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_O_I2SO1_MCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(101) | 3)
+
+#define PINMUX_GPIO102__FUNC_B_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_O_I2SO1_BCK (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_B0_I2SIN_WS (MTK_PIN_NO(102) | 3)
+
+#define PINMUX_GPIO103__FUNC_B_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_O_I2SO1_WS (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(103) | 3)
+
+#define PINMUX_GPIO104__FUNC_B_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_O_I2SO1_D0 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(104) | 3)
+
+#define PINMUX_GPIO105__FUNC_B_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_I0_VOW_DAT_MISO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(105) | 3)
+
+#define PINMUX_GPIO106__FUNC_B_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_I0_VOW_CLK_MISO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(106) | 3)
+
+#define PINMUX_GPIO107__FUNC_B_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_I0_SPLIN_MCK (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_O_CMVREF4 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_O_PGD_LV_LSC_PWR0 (MTK_PIN_NO(107) | 6)
+
+#define PINMUX_GPIO108__FUNC_B_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_I0_SPLIN_LRCK (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_O_DMIC4_CLK (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_O_CMVREF5 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_O_PGD_LV_LSC_PWR1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_B0_DBG_MON_B10 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_B_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_B0_I2SIN_WS (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I0_SPLIN_BCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_O_CMVREF6 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_O_PGD_LV_LSC_PWR2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_B0_DBG_MON_B11 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_B_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I0_SPLIN_D0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_O_CMVREF7 (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_O_PGD_LV_LSC_PWR3 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_B0_DBG_MON_B12 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_B_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I0_SPLIN_D1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_O_DMIC3_CLK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_O_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_O_PGD_LV_LSC_PWR4 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_B0_DBG_MON_B13 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_B_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I0_SPLIN_D2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_O_I2SO1_WS (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_O_PGD_LV_LSC_PWR5 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_B0_DBG_MON_B14 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_B_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I0_SPLIN_D3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_O_I2SO1_D0 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_B0_DBG_MON_B15 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_B_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_O_I2SO2_MCK (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_B0_DBG_MON_B16 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_B_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(115) | 6)
+#define PINMUX_GPIO115__FUNC_B0_DBG_MON_B17 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_B_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_B0_I2SO2_WS (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_B0_I2SIN_WS (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_B0_DBG_MON_B18 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_B_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_O_I2SO2_D0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(117) | 5)
+#define PINMUX_GPIO117__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_B0_DBG_MON_B19 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_B_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_O_I2SO2_D1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_B0_DBG_MON_B20 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_B_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_O_I2SO2_D2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_O_UTXD3 (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_O_I2SO1_MCK (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(119) | 6)
+#define PINMUX_GPIO119__FUNC_B0_DBG_MON_B21 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_B_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_O_I2SO2_D3 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_I1_URXD3 (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_I0_TDMIN_DI (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_O_I2SO1_BCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_B0_DBG_MON_B22 (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_B_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_B0_PCM_CLK (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_O_SPIM4_CSB (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_O_SCP_SPI1_B_CS (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_O_PGD_DA_EFUSE_RDY (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_B0_DBG_MON_B23 (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_B_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_B0_PCM_SYNC (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_O_SPIM4_CLK (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_O_SCP_SPI1_B_CK (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_O_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_B0_DBG_MON_B24 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_B_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_O_PCM_DO (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_O_SCP_SPI1_B_MO (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_O_PGD_DA_PWRGD_RESET (MTK_PIN_NO(123) | 6)
+#define PINMUX_GPIO123__FUNC_B0_DBG_MON_B25 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_B_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_I0_PCM_DI (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I0_SCP_SPI1_B_MI (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_O_PGD_DA_PWRGD_ENB (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_B0_DBG_MON_B26 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_B_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_O_DMIC1_CLK (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_O_SPINOR_CK (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_O_LVTS_FOUT (MTK_PIN_NO(125) | 6)
+#define PINMUX_GPIO125__FUNC_B0_DBG_MON_B27 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_B_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_O_SPINOR_CS (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_O_LVTS_SDO (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_B0_DBG_MON_B28 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_B_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_B0_SPINOR_IO0 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_I0_LVTS_26M (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_B0_DBG_MON_B29 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_B_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_O_DMIC2_CLK (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_B0_SPINOR_IO1 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_I0_TDMIN_DI (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_I0_LVTS_SCF (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_B0_DBG_MON_B30 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_B_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_B0_SPINOR_IO2 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_I0_LVTS_SCK (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_B0_DBG_MON_B31 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_B_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_B0_SPINOR_IO3 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_I0_LVTS_SDI (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_B0_DBG_MON_B32 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_B_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_O_DPI_D0 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_O_GBE_TXD3 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_O_DMIC1_CLK (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_O_I2SO2_MCK (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_O_SPIM5_CSB (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_O_PGD_LV_HSC_PWR0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_B_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_O_DPI_D1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_O_GBE_TXD2 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_O_SPIM5_CLK (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_O_PGD_LV_HSC_PWR1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_B_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_O_DPI_D2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_O_GBE_TXD1 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_B0_I2SO2_WS (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_O_PGD_LV_HSC_PWR2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_B_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_O_DPI_D3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_O_GBE_TXD0 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_O_DMIC2_CLK (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_O_I2SO2_D0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_O_PGD_LV_HSC_PWR3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_B_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_O_DPI_D4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_I0_GBE_RXD3 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_O_I2SO2_D1 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_I1_WAKEN (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_O_PGD_LV_HSC_PWR4 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_B_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_O_DPI_D5 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_I0_GBE_RXD2 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_O_I2SO2_D2 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_O_PERSTN (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_O_PGD_LV_HSC_PWR5 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_B_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_O_DPI_D6 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_I0_GBE_RXD1 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_O_DMIC3_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_O_I2SO2_D3 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_B1_CLKREQN (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_O_PWM_0 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_B_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_O_DPI_D7 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_I0_GBE_RXD0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_O_CLKM2 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_B_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_O_DPI_D8 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_B0_GBE_TXC (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_O_CLKM3 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_O_UTXD2 (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_B_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_O_DPI_D9 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_I0_GBE_RXC (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_O_DMIC4_CLK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_O_PWM_2 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_I1_URXD2 (MTK_PIN_NO(140) | 6)
+#define PINMUX_GPIO140__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_B_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_O_DPI_D10 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_I0_GBE_RXDV (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_O_PWM_3 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_O_URTS2 (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_B_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_O_DPI_D11 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_O_GBE_TXEN (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_O_PWM_1 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(142) | 5)
+#define PINMUX_GPIO142__FUNC_I1_UCTS2 (MTK_PIN_NO(142) | 6)
+#define PINMUX_GPIO142__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_B_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_O_DPI_D12 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_O_GBE_MDC (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_O_CLKM0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_O_SPIM3_CSB (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_O_UTXD1 (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_B_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_O_DPI_D13 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_B1_GBE_MDIO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_O_CLKM1 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_O_SPIM3_CLK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_I1_URXD1 (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_O_PGD_HV_HSC_PWR0 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_B_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_O_DPI_D14 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_O_GBE_TXER (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_O_CMFLASH0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_B0_GBE_AUX_PPS2 (MTK_PIN_NO(145) | 6)
+#define PINMUX_GPIO145__FUNC_O_PGD_HV_HSC_PWR1 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_B_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_O_DPI_D15 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I0_GBE_RXER (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_O_CMFLASH1 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_B0_GBE_AUX_PPS3 (MTK_PIN_NO(146) | 6)
+#define PINMUX_GPIO146__FUNC_O_PGD_HV_HSC_PWR2 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_B_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_O_DPI_HSYNC (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_I0_GBE_COL (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_O_I2SO1_MCK (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_O_CMVREF0 (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_O_SPDIF_OUT (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_O_URTS1 (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_O_PGD_HV_HSC_PWR3 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_B_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_O_DPI_VSYNC (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_I0_GBE_INTR (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_O_I2SO1_BCK (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_O_CMVREF1 (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_I1_UCTS1 (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_O_PGD_HV_HSC_PWR4 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_B_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_O_DPI_DE (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_B0_GBE_AUX_PPS0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_O_I2SO1_WS (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_O_CMVREF2 (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_O_UTXD3 (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_O_PGD_HV_HSC_PWR5 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_B_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_O_DPI_CK (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_B0_GBE_AUX_PPS1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_O_I2SO1_D0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_O_CMVREF3 (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_I1_URXD3 (MTK_PIN_NO(150) | 6)
+
+#define PINMUX_GPIO151__FUNC_B_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7 (MTK_PIN_NO(151) | 1)
+
+#define PINMUX_GPIO152__FUNC_B_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6 (MTK_PIN_NO(152) | 1)
+
+#define PINMUX_GPIO153__FUNC_B_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5 (MTK_PIN_NO(153) | 1)
+
+#define PINMUX_GPIO154__FUNC_B_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4 (MTK_PIN_NO(154) | 1)
+
+#define PINMUX_GPIO155__FUNC_B_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_O_MSDC0_RSTB (MTK_PIN_NO(155) | 1)
+
+#define PINMUX_GPIO156__FUNC_B_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_B1_MSDC0_CMD (MTK_PIN_NO(156) | 1)
+
+#define PINMUX_GPIO157__FUNC_B_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_B1_MSDC0_CLK (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_B_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_B_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_B_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_B_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_B_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_B0_MSDC0_DSL (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_B_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_B1_MSDC1_CMD (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_O_SPDIF_OUT (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(163) | 6)
+#define PINMUX_GPIO163__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_B_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_B1_MSDC1_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(164) | 6)
+#define PINMUX_GPIO164__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_B_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_B_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(166) | 6)
+#define PINMUX_GPIO166__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_B_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_O_PWM_0 (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(167) | 6)
+#define PINMUX_GPIO167__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_B_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_O_PWM_1 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_O_CLKM0 (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_B_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_B1_MSDC2_CMD (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_O_LVTS_FOUT (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_I0_UDI_TMS (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_B_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_B1_MSDC2_CLK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_O_LVTS_SDO (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I0_UDI_TCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_B_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I0_LVTS_26M (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I0_UDI_TDI (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_B_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I0_LVTS_SCF (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_O_UDI_TDO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_I0_TDMIN_DI (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_B_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I0_LVTS_SCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I0_UDI_NTRST (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_B_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3 (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I0_LVTS_SDI (MTK_PIN_NO(174) | 2)
+
+#define PINMUX_GPIO175__FUNC_B_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_B0_SPMI_M_SCL (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_B_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_B0_SPMI_M_SDA (MTK_PIN_NO(176) | 1)
+
+#endif /* __MEDIATEK_MT8188-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-starfive.h b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
index de4f75c2c9e8..a200f546d078 100644
--- a/include/dt-bindings/pinctrl/pinctrl-starfive.h
+++ b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
@@ -3,8 +3,8 @@
* Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
*/
-#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_H__
-#define __DT_BINDINGS_PINCTRL_STARFIVE_H__
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
#define PAD_GPIO_OFFSET 0
#define PAD_FUNC_SHARE_OFFSET 64
@@ -272,4 +272,4 @@
#define GPI_NONE 0xff
-#endif /* __DT_BINDINGS_PINCTRL_STARFIVE_H__ */
+#endif /* __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index 950970634dfe..d1da5ff68d0c 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -10,6 +10,13 @@
#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure register values.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead use the header in the DTS source directory."
+
#define EXYNOS_PIN_PULL_NONE 0
#define EXYNOS_PIN_PULL_DOWN 1
#define EXYNOS_PIN_PULL_UP 3
diff --git a/include/dt-bindings/power/fsl,imx93-power.h b/include/dt-bindings/power/fsl,imx93-power.h
new file mode 100644
index 000000000000..17f9f015bf7d
--- /dev/null
+++ b/include/dt-bindings/power/fsl,imx93-power.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX93_POWER_H__
+#define __DT_BINDINGS_IMX93_POWER_H__
+
+#define IMX93_MEDIABLK_PD_MIPI_DSI 0
+#define IMX93_MEDIABLK_PD_MIPI_CSI 1
+#define IMX93_MEDIABLK_PD_PXP 2
+#define IMX93_MEDIABLK_PD_LCDIF 3
+#define IMX93_MEDIABLK_PD_ISI 4
+
+#endif
diff --git a/include/dt-bindings/power/imx8mp-power.h b/include/dt-bindings/power/imx8mp-power.h
index 7789bcca3223..2fe3c2abad13 100644
--- a/include/dt-bindings/power/imx8mp-power.h
+++ b/include/dt-bindings/power/imx8mp-power.h
@@ -49,5 +49,11 @@
#define IMX8MP_HDMIBLK_PD_TRNG 4
#define IMX8MP_HDMIBLK_PD_HDMI_TX 5
#define IMX8MP_HDMIBLK_PD_HDMI_TX_PHY 6
+#define IMX8MP_HDMIBLK_PD_HDCP 7
+#define IMX8MP_HDMIBLK_PD_HRV 8
+
+#define IMX8MP_VPUBLK_PD_G1 0
+#define IMX8MP_VPUBLK_PD_G2 1
+#define IMX8MP_VPUBLK_PD_VC8000E 2
#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index d81de63ae31c..f5f82dde7399 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -36,6 +36,18 @@
#define SM6350_MSS 4
#define SM6350_MX 5
+/* SM6350 Power Domain Indexes */
+#define SM6375_VDDCX 0
+#define SM6375_VDDCX_AO 1
+#define SM6375_VDDCX_VFL 2
+#define SM6375_VDDMX 3
+#define SM6375_VDDMX_AO 4
+#define SM6375_VDDMX_VFL 5
+#define SM6375_VDDGX 6
+#define SM6375_VDDGX_AO 7
+#define SM6375_VDD_LPI_CX 8
+#define SM6375_VDD_LPI_MX 9
+
/* SM8150 Power Domain Indexes */
#define SM8150_MSS 0
#define SM8150_EBI 1
diff --git a/include/dt-bindings/power/rk3588-power.h b/include/dt-bindings/power/rk3588-power.h
new file mode 100644
index 000000000000..1b92fec013cb
--- /dev/null
+++ b/include/dt-bindings/power/rk3588-power.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3588_POWER_H__
+#define __DT_BINDINGS_POWER_RK3588_POWER_H__
+
+/* VD_LITDSU */
+#define RK3588_PD_CPU_0 0
+#define RK3588_PD_CPU_1 1
+#define RK3588_PD_CPU_2 2
+#define RK3588_PD_CPU_3 3
+
+/* VD_BIGCORE0 */
+#define RK3588_PD_CPU_4 4
+#define RK3588_PD_CPU_5 5
+
+/* VD_BIGCORE1 */
+#define RK3588_PD_CPU_6 6
+#define RK3588_PD_CPU_7 7
+
+/* VD_NPU */
+#define RK3588_PD_NPU 8
+#define RK3588_PD_NPUTOP 9
+#define RK3588_PD_NPU1 10
+#define RK3588_PD_NPU2 11
+
+/* VD_GPU */
+#define RK3588_PD_GPU 12
+
+/* VD_VCODEC */
+#define RK3588_PD_VCODEC 13
+#define RK3588_PD_RKVDEC0 14
+#define RK3588_PD_RKVDEC1 15
+#define RK3588_PD_VENC0 16
+#define RK3588_PD_VENC1 17
+
+/* VD_DD01 */
+#define RK3588_PD_DDR01 18
+
+/* VD_DD23 */
+#define RK3588_PD_DDR23 19
+
+/* VD_LOGIC */
+#define RK3588_PD_CENTER 20
+#define RK3588_PD_VDPU 21
+#define RK3588_PD_RGA30 22
+#define RK3588_PD_AV1 23
+#define RK3588_PD_VOP 24
+#define RK3588_PD_VO0 25
+#define RK3588_PD_VO1 26
+#define RK3588_PD_VI 27
+#define RK3588_PD_ISP1 28
+#define RK3588_PD_FEC 29
+#define RK3588_PD_RGA31 30
+#define RK3588_PD_USB 31
+#define RK3588_PD_PHP 32
+#define RK3588_PD_GMAC 33
+#define RK3588_PD_PCIE 34
+#define RK3588_PD_NVM 35
+#define RK3588_PD_NVM0 36
+#define RK3588_PD_SDIO 37
+#define RK3588_PD_AUDIO 38
+#define RK3588_PD_SECURE 39
+#define RK3588_PD_SDMMC 40
+#define RK3588_PD_CRYPTO 41
+#define RK3588_PD_BUS 42
+
+/* VD_PMU */
+#define RK3588_PD_PMU1 43
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rv1126-power.h b/include/dt-bindings/power/rockchip,rv1126-power.h
new file mode 100644
index 000000000000..38a68e000d38
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rv1126-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_POWER_RV1126_POWER_H__
+#define __DT_BINDINGS_POWER_RV1126_POWER_H__
+
+/* VD_CORE */
+#define RV1126_PD_CPU_0 0
+#define RV1126_PD_CPU_1 1
+#define RV1126_PD_CPU_2 2
+#define RV1126_PD_CPU_3 3
+#define RV1126_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RV1126_PD_PMU 5
+#define RV1126_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RV1126_PD_NPU 7
+
+/* VD_VEPU */
+#define RV1126_PD_VEPU 8
+
+/* VD_LOGIC */
+#define RV1126_PD_VI 9
+#define RV1126_PD_VO 10
+#define RV1126_PD_ISPP 11
+#define RV1126_PD_VDPU 12
+#define RV1126_PD_CRYPTO 13
+#define RV1126_PD_DDR 14
+#define RV1126_PD_NVM 15
+#define RV1126_PD_SDIO 16
+#define RV1126_PD_USB 17
+#define RV1126_PD_LOGIC_ALIVE 18
+
+#endif
diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h
index 3578e83026bc..c691efaa678f 100644
--- a/include/dt-bindings/reset/bt1-ccu.h
+++ b/include/dt-bindings/reset/bt1-ccu.h
@@ -21,5 +21,14 @@
#define CCU_SYS_SATA_REF_RST 0
#define CCU_SYS_APB_RST 1
+#define CCU_SYS_DDR_FULL_RST 2
+#define CCU_SYS_DDR_INIT_RST 3
+#define CCU_SYS_PCIE_PCS_PHY_RST 4
+#define CCU_SYS_PCIE_PIPE0_RST 5
+#define CCU_SYS_PCIE_CORE_RST 6
+#define CCU_SYS_PCIE_PWR_RST 7
+#define CCU_SYS_PCIE_STICKY_RST 8
+#define CCU_SYS_PCIE_NSTICKY_RST 9
+#define CCU_SYS_PCIE_HOT_RST 10
#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6795-resets.h b/include/dt-bindings/reset/mediatek,mt6795-resets.h
new file mode 100644
index 000000000000..5464a4a79a70
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6795-resets.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT6795
+#define _DT_BINDINGS_RESET_CONTROLLER_MT6795
+
+/* INFRACFG resets */
+#define MT6795_INFRA_RST0_SCPSYS_RST 0
+#define MT6795_INFRA_RST0_PMIC_WRAP_RST 1
+#define MT6795_INFRA_RST1_MIPI_DSI_RST 2
+#define MT6795_INFRA_RST1_MIPI_CSI_RST 3
+#define MT6795_INFRA_RST1_MM_IOMMU_RST 4
+
+/* MMSYS resets */
+#define MT6795_MMSYS_SW0_RST_B_SMI_COMMON 0
+#define MT6795_MMSYS_SW0_RST_B_SMI_LARB 1
+#define MT6795_MMSYS_SW0_RST_B_CAM_MDP 2
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA0 3
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA1 4
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ0 5
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ1 6
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ2 7
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP0 8
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP1 9
+#define MT6795_MMSYS_SW0_RST_B_MDP_WDMA 10
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT0 11
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT1 12
+#define MT6795_MMSYS_SW0_RST_B_MDP_CROP 13
+
+/* PERICFG resets */
+#define MT6795_PERI_NFI_SW_RST 0
+#define MT6795_PERI_THERM_SW_RST 1
+#define MT6795_PERI_MSDC1_SW_RST 2
+
+/* TOPRGU resets */
+#define MT6795_TOPRGU_INFRA_SW_RST 0
+#define MT6795_TOPRGU_MM_SW_RST 1
+#define MT6795_TOPRGU_MFG_SW_RST 2
+#define MT6795_TOPRGU_VENC_SW_RST 3
+#define MT6795_TOPRGU_VDEC_SW_RST 4
+#define MT6795_TOPRGU_IMG_SW_RST 5
+#define MT6795_TOPRGU_DDRPHY_SW_RST 6
+#define MT6795_TOPRGU_MD_SW_RST 7
+#define MT6795_TOPRGU_INFRA_AO_SW_RST 8
+#define MT6795_TOPRGU_MD_LITE_SW_RST 9
+#define MT6795_TOPRGU_APMIXED_SW_RST 10
+#define MT6795_TOPRGU_PWRAP_SPI_CTL_RST 11
+#define MT6795_TOPRGU_SW_RST_NUM 12
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT6795 */
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
index 0b1937f14b36..24ab3631dcea 100644
--- a/include/dt-bindings/reset/mt8195-resets.h
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -31,5 +31,8 @@
#define MT8195_INFRA_RST0_THERM_CTRL_SWRST 0
#define MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST 1
#define MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST 2
+#define MT8195_INFRA_RST2_PCIE_P0_SWRST 3
+#define MT8195_INFRA_RST2_PCIE_P1_SWRST 4
+#define MT8195_INFRA_RST2_USBSIF_P1_SWRST 5
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
index 0d3276c8fc11..9f7c5103bc82 100644
--- a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -193,6 +193,24 @@
#define LPASS_CLK_ID_RX_CORE_MCLK 59
#define LPASS_CLK_ID_RX_CORE_NPL_MCLK 60
#define LPASS_CLK_ID_VA_CORE_2X_MCLK 61
+/* Clock ID for MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_MCLK 62
+/* Clock ID for NPL MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_2X_MCLK 63
+/* Clock ID for RX Core TX MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_MCLK 64
+/* Clock ID for RX CORE TX 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_2X_MCLK 65
+/* Clock ID for WSA core TX MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_MCLK 66
+/* Clock ID for WSA core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK 67
+/* Clock ID for WSA2 core TX MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_MCLK 68
+/* Clock ID for WSA2 core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK 69
+/* Clock ID for RX CORE MCLK2 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_MCLK2_2X_MCLK 70
#define LPASS_HW_AVTIMER_VOTE 101
#define LPASS_HW_MACRO_VOTE 102
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index 4b52e12c2ae8..ace3de8d1ee7 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -42,16 +42,15 @@ struct kunit_loc {
/**
* struct kunit_assert - Data for printing a failed assertion or expectation.
- * @format: a function which formats the data in this kunit_assert to a string.
*
* Represents a failed expectation/assertion. Contains all the data necessary to
* format a string to a user reporting the failure.
*/
-struct kunit_assert {
- void (*format)(const struct kunit_assert *assert,
- const struct va_format *message,
- struct string_stream *stream);
-};
+struct kunit_assert {};
+
+typedef void (*assert_format_t)(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
void kunit_assert_prologue(const struct kunit_loc *loc,
enum kunit_assert_type type,
@@ -72,16 +71,6 @@ void kunit_fail_assert_format(const struct kunit_assert *assert,
struct string_stream *stream);
/**
- * KUNIT_INIT_FAIL_ASSERT_STRUCT - Initializer for &struct kunit_fail_assert.
- *
- * Initializes a &struct kunit_fail_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_FAIL_ASSERT_STRUCT { \
- .assert = { .format = kunit_fail_assert_format }, \
-}
-
-/**
* struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
* @assert: The parent of this type.
* @condition: A string representation of a conditional expression.
@@ -110,7 +99,6 @@ void kunit_unary_assert_format(const struct kunit_assert *assert,
* KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
*/
#define KUNIT_INIT_UNARY_ASSERT_STRUCT(cond, expect_true) { \
- .assert = { .format = kunit_unary_assert_format }, \
.condition = cond, \
.expected_true = expect_true \
}
@@ -145,7 +133,6 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
* KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
*/
#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(txt, val) { \
- .assert = { .format = kunit_ptr_not_err_assert_format }, \
.text = txt, \
.value = val \
}
@@ -190,7 +177,6 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
* KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a binary assert like
* kunit_binary_assert, kunit_binary_ptr_assert, etc.
*
- * @format_func: a function which formats the assert to a string.
* @text_: Pointer to a kunit_binary_assert_text.
* @left_val: The actual evaluated value of the expression in the left slot.
* @right_val: The actual evaluated value of the expression in the right slot.
@@ -200,11 +186,9 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
* fields but with different types for left_val/right_val.
* This is ultimately used by binary assertion macros like KUNIT_EXPECT_EQ, etc.
*/
-#define KUNIT_INIT_BINARY_ASSERT_STRUCT(format_func, \
- text_, \
+#define KUNIT_INIT_BINARY_ASSERT_STRUCT(text_, \
left_val, \
right_val) { \
- .assert = { .format = format_func }, \
.text = text_, \
.left_value = left_val, \
.right_value = right_val \
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
index 09c2b34d1c61..cf6fb8f2ac1b 100644
--- a/include/kunit/resource.h
+++ b/include/kunit/resource.h
@@ -301,22 +301,6 @@ typedef bool (*kunit_resource_match_t)(struct kunit *test,
void *match_data);
/**
- * kunit_resource_instance_match() - Match a resource with the same instance.
- * @test: Test case to which the resource belongs.
- * @res: The resource.
- * @match_data: The resource pointer to match against.
- *
- * An instance of kunit_resource_match_t that matches a resource whose
- * allocation matches @match_data.
- */
-static inline bool kunit_resource_instance_match(struct kunit *test,
- struct kunit_resource *res,
- void *match_data)
-{
- return res->data == match_data;
-}
-
-/**
* kunit_resource_name_match() - Match a resource with the same name.
* @test: Test case to which the resource belongs.
* @res: The resource.
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 840a2c375065..b1ab6b32216d 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -228,6 +228,8 @@ static inline void kunit_set_failure(struct kunit *test)
WRITE_ONCE(test->status, KUNIT_FAILURE);
}
+bool kunit_enabled(void);
+
void kunit_init_test(struct kunit *test, const char *name, char *log);
int kunit_run_tests(struct kunit_suite *suite);
@@ -251,7 +253,6 @@ static inline int kunit_run_all_tests(void)
#endif /* IS_BUILTIN(CONFIG_KUNIT) */
#define __kunit_test_suites(unique_array, ...) \
- MODULE_INFO(test, "Y"); \
static struct kunit_suite *unique_array[] \
__aligned(sizeof(struct kunit_suite *)) \
__used __section(".kunit_test_suites") = { __VA_ARGS__ }
@@ -472,30 +473,30 @@ void kunit_do_failed_assertion(struct kunit *test,
const struct kunit_loc *loc,
enum kunit_assert_type type,
const struct kunit_assert *assert,
+ assert_format_t assert_format,
const char *fmt, ...);
-#define KUNIT_ASSERTION(test, assert_type, pass, assert_class, INITIALIZER, fmt, ...) do { \
- if (unlikely(!(pass))) { \
- static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \
- struct assert_class __assertion = INITIALIZER; \
- kunit_do_failed_assertion(test, \
- &__loc, \
- assert_type, \
- &__assertion.assert, \
- fmt, \
- ##__VA_ARGS__); \
- } \
+#define _KUNIT_FAILED(test, assert_type, assert_class, assert_format, INITIALIZER, fmt, ...) do { \
+ static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \
+ const struct assert_class __assertion = INITIALIZER; \
+ kunit_do_failed_assertion(test, \
+ &__loc, \
+ assert_type, \
+ &__assertion.assert, \
+ assert_format, \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \
- KUNIT_ASSERTION(test, \
- assert_type, \
- false, \
- kunit_fail_assert, \
- KUNIT_INIT_FAIL_ASSERT_STRUCT, \
- fmt, \
- ##__VA_ARGS__)
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_fail_assert, \
+ kunit_fail_assert_format, \
+ {}, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_FAIL() - Always causes a test to fail when evaluated.
@@ -520,14 +521,19 @@ void kunit_do_failed_assertion(struct kunit *test,
expected_true, \
fmt, \
...) \
- KUNIT_ASSERTION(test, \
- assert_type, \
- !!(condition) == !!expected_true, \
- kunit_unary_assert, \
- KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \
- expected_true), \
- fmt, \
- ##__VA_ARGS__)
+do { \
+ if (likely(!!(condition) == !!expected_true)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_unary_assert, \
+ kunit_unary_assert_format, \
+ KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \
+ expected_true), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
@@ -577,16 +583,18 @@ do { \
.right_text = #right, \
}; \
\
- KUNIT_ASSERTION(test, \
- assert_type, \
- __left op __right, \
- assert_class, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT(format_func, \
- &__text, \
- __left, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ if (likely(__left op __right)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ assert_class, \
+ format_func, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \
+ __left, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
#define KUNIT_BINARY_INT_ASSERTION(test, \
@@ -635,16 +643,19 @@ do { \
.right_text = #right, \
}; \
\
- KUNIT_ASSERTION(test, \
- assert_type, \
- strcmp(__left, __right) op 0, \
- kunit_binary_str_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT(kunit_binary_str_assert_format,\
- &__text, \
- __left, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ if (likely(strcmp(__left, __right) op 0)) \
+ break; \
+ \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_binary_str_assert, \
+ kunit_binary_str_assert_format, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \
+ __left, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
#define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -655,14 +666,16 @@ do { \
do { \
const typeof(ptr) __ptr = (ptr); \
\
- KUNIT_ASSERTION(test, \
- assert_type, \
- !IS_ERR_OR_NULL(__ptr), \
- kunit_ptr_not_err_assert, \
- KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, \
- __ptr), \
- fmt, \
- ##__VA_ARGS__); \
+ if (!IS_ERR_OR_NULL(__ptr)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_ptr_not_err_assert, \
+ kunit_ptr_not_err_assert_format, \
+ KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, __ptr), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
/**
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
deleted file mode 100644
index 600cf45645c6..000000000000
--- a/include/linux/a.out.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __A_OUT_GNU_H__
-#define __A_OUT_GNU_H__
-
-#include <uapi/linux/a.out.h>
-
-#ifndef __ASSEMBLY__
-#ifdef linux
-#include <asm/page.h>
-#if defined(__i386__) || defined(__mc68000__)
-#else
-#ifndef SEGMENT_SIZE
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-#endif
-#endif
-#endif /*__ASSEMBLY__ */
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6f64b2f3dc54..3015235d65e3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -279,14 +279,17 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+#if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+void acpi_arch_dma_setup(struct device *dev);
+#else
+static inline void acpi_arch_dma_setup(struct device *dev) { }
+#endif
+
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
-void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size);
#else
static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
-static inline void
-acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) { }
#endif
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
@@ -495,7 +498,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
@@ -506,6 +509,7 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
void *preproc_data);
int acpi_dev_get_dma_resources(struct acpi_device *adev,
struct list_head *list);
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -798,6 +802,11 @@ acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *u
return false;
}
+static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
+{
+ return -ENODEV;
+}
+
static inline struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
@@ -977,8 +986,7 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
- u64 *offset, u64 *size)
+static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
return -ENODEV;
}
@@ -1075,6 +1083,7 @@ acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
struct acpi_s2idle_dev_ops {
struct list_head list_node;
void (*prepare)(void);
+ void (*check)(void);
void (*restore)(void);
};
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
@@ -1202,7 +1211,8 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index);
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable);
#else
static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio)
@@ -1214,16 +1224,28 @@ static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
{
return false;
}
-static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev,
- const char *name, int index)
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name,
+ int index, bool *wake_capable)
{
return -ENXIO;
}
#endif
+static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index,
+ bool *wake_capable)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
+}
+
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name,
+ int index)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL);
+}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
- return acpi_dev_gpio_irq_get_by(adev, NULL, index);
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL);
}
/* Device properties */
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 49e5383d4222..17fa26215292 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -13,6 +13,7 @@
#include <linux/compiler.h>
+struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
@@ -21,8 +22,12 @@ struct scsi_host_template;
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
+ const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
@@ -41,6 +46,7 @@ int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
-#define AHCI_PLATFORM_GET_RESETS 0x01
+#define AHCI_PLATFORM_GET_RESETS BIT(0)
+#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
new file mode 100644
index 000000000000..1c4b8659f171
--- /dev/null
+++ b/include/linux/amd-pstate.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/include/linux/amd-pstate.h
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ */
+
+#ifndef _LINUX_AMD_PSTATE_H
+#define _LINUX_AMD_PSTATE_H
+
+#include <linux/pm_qos.h>
+
+/*********************************************************************
+ * AMD P-state INTERFACE *
+ *********************************************************************/
+/**
+ * struct amd_aperf_mperf
+ * @aperf: actual performance frequency clock count
+ * @mperf: maximum performance frequency clock count
+ * @tsc: time stamp counter
+ */
+struct amd_aperf_mperf {
+ u64 aperf;
+ u64 mperf;
+ u64 tsc;
+};
+
+/**
+ * struct amd_cpudata - private CPU data for AMD P-State
+ * @cpu: CPU number
+ * @req: constraint request to apply
+ * @cppc_req_cached: cached performance request hints
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @max_freq: the frequency that mapped to highest_perf
+ * @min_freq: the frequency that mapped to lowest_perf
+ * @nominal_freq: the frequency that mapped to nominal_perf
+ * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
+ * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
+ * @prev: Last Aperf/Mperf/tsc count value read from register
+ * @freq: current cpu frequency value
+ * @boost_supported: check whether the Processor or SBIOS supports boost mode
+ *
+ * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
+ * represents all the attributes and goals that AMD P-State requests at runtime.
+ */
+struct amd_cpudata {
+ int cpu;
+
+ struct freq_qos_request req[2];
+ u64 cppc_req_cached;
+
+ u32 highest_perf;
+ u32 nominal_perf;
+ u32 lowest_nonlinear_perf;
+ u32 lowest_perf;
+
+ u32 max_freq;
+ u32 min_freq;
+ u32 nominal_freq;
+ u32 lowest_nonlinear_freq;
+
+ struct amd_aperf_mperf cur;
+ struct amd_aperf_mperf prev;
+
+ u64 freq;
+ bool boost_supported;
+};
+
+#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index e5c76c1ef9ed..5f02d2e6b9d9 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -17,6 +17,7 @@ struct ffa_device {
bool mode_32bit;
uuid_t uuid;
struct device dev;
+ const struct ffa_ops *ops;
};
#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
@@ -47,17 +48,18 @@ static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
}
#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id);
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops);
void ffa_device_unregister(struct ffa_device *ffa_dev);
int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name);
void ffa_driver_unregister(struct ffa_driver *driver);
bool ffa_device_is_valid(struct ffa_device *ffa_dev);
-const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev);
#else
static inline
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
{
return NULL;
}
@@ -76,11 +78,6 @@ static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
static inline
bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
-static inline
-const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
-{
- return NULL;
-}
#endif /* CONFIG_ARM_FFA_TRANSPORT */
#define ffa_register(driver) \
@@ -109,7 +106,10 @@ struct ffa_partition_info {
#define FFA_PARTITION_DIRECT_SEND BIT(1)
/* partition can send and receive indirect messages. */
#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC BIT(8)
u32 properties;
+ u32 uuid[4];
};
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
@@ -257,18 +257,28 @@ struct ffa_mem_ops_args {
struct ffa_mem_region_attributes *attrs;
};
-struct ffa_dev_ops {
+struct ffa_info_ops {
u32 (*api_version_get)(void);
int (*partition_info_get)(const char *uuid_str,
struct ffa_partition_info *buffer);
+};
+
+struct ffa_msg_ops {
void (*mode_32bit_set)(struct ffa_device *dev);
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
+};
+
+struct ffa_mem_ops {
int (*memory_reclaim)(u64 g_handle, u32 flags);
- int (*memory_share)(struct ffa_device *dev,
- struct ffa_mem_ops_args *args);
- int (*memory_lend)(struct ffa_device *dev,
- struct ffa_mem_ops_args *args);
+ int (*memory_share)(struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_mem_ops_args *args);
+};
+
+struct ffa_ops {
+ const struct ffa_info_ops *info_ops;
+ const struct ffa_msg_ops *msg_ops;
+ const struct ffa_mem_ops *mem_ops;
};
#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 21292b5bbb55..e3050e153a71 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -566,6 +566,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -578,9 +590,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -592,17 +601,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -771,16 +769,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
+ return false;
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index e3314f746bfa..2d94c30ed439 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -271,6 +271,7 @@
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_OTP_PRESENT 0x00000020
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 3dc20c4f394c..8d51f69f9f5e 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -43,9 +43,6 @@ struct linux_binprm {
* original userspace.
*/
point_of_no_return:1;
-#ifdef __alpha__
- unsigned int taso:1;
-#endif
struct file *executable; /* Executable to pass to the interpreter */
struct file *interpreter;
struct file *file;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ca22b06700a9..2c5806997bbf 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -509,7 +509,7 @@ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
bio_clear_flag(bio, BIO_REMAPPED);
if (bio->bi_bdev != bdev)
- bio_clear_flag(bio, BIO_THROTTLED);
+ bio_clear_flag(bio, BIO_BPS_THROTTLED);
bio->bi_bdev = bdev;
bio_associate_blkg(bio);
}
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index f65410a49fda..7d6d73b78147 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -51,6 +51,7 @@ struct device;
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
@@ -164,6 +165,8 @@ bool __bitmap_intersects(const unsigned long *bitmap1,
bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -222,7 +225,6 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
#else
#define bitmap_copy_le bitmap_copy
#endif
-unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
@@ -439,6 +441,15 @@ unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
return __bitmap_weight(src, nbits);
}
+static __always_inline
+unsigned long bitmap_weight_and(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_and(src1, src2, nbits);
+}
+
static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
unsigned int nbits)
{
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 3b89c64bcfd8..2ba557e067fe 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -248,6 +248,25 @@ static inline unsigned long __ffs64(u64 word)
}
/**
+ * fns - find N'th set bit in a word
+ * @word: The word to search
+ * @n: Bit to find
+ */
+static inline unsigned long fns(unsigned long word, unsigned int n)
+{
+ unsigned int bit;
+
+ while (word) {
+ bit = __ffs(word);
+ if (n-- == 0)
+ return bit;
+ __clear_bit(bit, &word);
+ }
+
+ return BITS_PER_LONG;
+}
+
+/**
* assign_bit - Assign value to a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
@@ -328,10 +347,10 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
new__ = (old__ & ~mask__) | bits__; \
- } while (cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
old__; \
})
@@ -343,11 +362,12 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
+ if (old__ & test__) \
+ break; \
new__ = old__ & ~clear__; \
- } while (!(old__ & test__) && \
- cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
!(old__ & test__); \
})
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 9f40dbc65f82..dd5841a42c33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -18,14 +18,14 @@
struct bio;
struct cgroup_subsys_state;
-struct request_queue;
+struct gendisk;
#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
extern struct cgroup_subsys_state * const blkcg_root_css;
-void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
bool blk_cgroup_congested(void);
void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
@@ -39,7 +39,6 @@ struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }
-static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
{
return NULL;
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 0b1f45c62623..ca544e1d3508 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -5,7 +5,7 @@
struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset);
+void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
+ int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
index 5cc5f0f36218..53b58c610e76 100644
--- a/include/linux/blk-mq-rdma.h
+++ b/include/linux/blk-mq-rdma.h
@@ -5,7 +5,7 @@
struct blk_mq_tag_set;
struct ib_device;
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
+void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 687ae287e1dc..13226e9b22dd 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -5,7 +5,7 @@
struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
+void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 92294a5fb083..ba18e9bdb799 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -14,7 +14,12 @@ struct blk_flush_queue;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_DEFAULT_RQ 128
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
/*
* request flags */
@@ -268,9 +273,16 @@ static inline void rq_list_move(struct request **src, struct request **dst,
rq_list_add(dst, rq);
}
+/**
+ * enum blk_eh_timer_return - How the timeout handler should proceed
+ * @BLK_EH_DONE: The block driver completed the command or will complete it at
+ * a later time.
+ * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
+ * request to complete.
+ */
enum blk_eh_timer_return {
- BLK_EH_DONE, /* drivers has completed the command */
- BLK_EH_RESET_TIMER, /* reset timer and try again */
+ BLK_EH_DONE,
+ BLK_EH_RESET_TIMER,
};
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
@@ -630,7 +642,7 @@ struct blk_mq_ops {
* @map_queues: This allows drivers specify their own queue mapping by
* overriding the setup-time function that builds the mq_map.
*/
- int (*map_queues)(struct blk_mq_tag_set *set);
+ void (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
/**
@@ -841,8 +853,9 @@ static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
void (*complete)(struct io_comp_batch *))
{
- if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror)
+ if (!iob || (req->rq_flags & RQF_ELV) || ioerror)
return false;
+
if (!iob->complete)
iob->complete = complete;
else if (iob->complete != complete)
@@ -880,7 +893,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
@@ -963,15 +976,17 @@ blk_status_t blk_insert_cloned_request(struct request *rq);
struct rq_map_data {
struct page **pages;
- int page_order;
- int nr_entries;
unsigned long offset;
- int null_mapped;
- int from_user;
+ unsigned short page_order;
+ unsigned short nr_entries;
+ bool null_mapped;
+ bool from_user;
};
int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *, gfp_t);
int blk_rq_unmap_user(struct bio *);
@@ -980,6 +995,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
struct req_iterator {
struct bvec_iter iter;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1ef99790f6ed..e0b098089ef2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -321,11 +321,10 @@ enum {
BIO_NO_PAGE_REF, /* don't put release vec pages */
BIO_CLONED, /* doesn't own data */
BIO_BOUNCED, /* bio is a bounce bio */
- BIO_WORKINGSET, /* contains userspace workingset pages */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
- BIO_THROTTLED, /* This bio has already been subjected to
+ BIO_BPS_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 84b13fdd34a7..50e358a19d98 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -580,9 +580,9 @@ struct request_queue {
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_NOWAIT))
+#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
+ (1UL << QUEUE_FLAG_SAME_COMP) | \
+ (1UL << QUEUE_FLAG_NOWAIT))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
@@ -618,7 +618,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
-#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
@@ -1280,6 +1279,11 @@ static inline bool bdev_fua(struct block_device *bdev)
return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
}
+static inline bool bdev_nowait(struct block_device *bdev)
+{
+ return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
+}
+
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1300,6 +1304,15 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
+static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+ blk_opf_t op)
+{
+ if (!bdev_is_zoned(bdev))
+ return false;
+
+ return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
+}
+
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1498,6 +1511,7 @@ int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
+void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
void printk_all_partitions(void);
#else
static inline void invalidate_bdev(struct block_device *bdev)
@@ -1514,6 +1528,9 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
static inline void sync_bdevs(bool wait)
{
}
+static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
+{
+}
static inline void printk_all_partitions(void)
{
}
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 31c9e323a391..4d4a62d49341 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -33,8 +33,8 @@ struct bma150_cfg {
unsigned char lg_hyst; /* Low-G hysterisis */
unsigned char lg_dur; /* Low-G duration */
unsigned char lg_thres; /* Low-G threshold */
- unsigned char range; /* one of BMA0150_RANGE_xxx */
- unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+ unsigned char range; /* one of BMA150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA150_BW_xxx */
};
struct bma150_platform_data {
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 2bd1b5f8de9b..57e9e109257e 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -414,6 +414,11 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr,
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
@@ -444,6 +449,18 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
+static inline const struct bpf_func_proto *
+cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 20c26aed7896..9e7d46d16032 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -48,6 +48,7 @@ struct mem_cgroup;
struct module;
struct bpf_func_state;
struct ftrace_ops;
+struct cgroup;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -279,14 +280,33 @@ static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
}
}
-/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
-static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ *ldst++ = *lsrc++;
+}
+
+/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, bool long_memcpy)
{
u32 curr_off = 0;
int i;
if (likely(!map->off_arr)) {
- memcpy(dst, src, map->value_size);
+ if (long_memcpy)
+ bpf_long_memcpy(dst, src, round_up(map->value_size, 8));
+ else
+ memcpy(dst, src, map->value_size);
return;
}
@@ -298,6 +318,36 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
}
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
+
+static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
+{
+ __copy_map_value(map, dst, src, false);
+}
+
+static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
+{
+ __copy_map_value(map, dst, src, true);
+}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (likely(!map->off_arr)) {
+ memset(dst, 0, map->value_size);
+ return;
+ }
+
+ for (i = 0; i < map->off_arr->cnt; i++) {
+ u32 next_off = map->off_arr->field_off[i];
+
+ memset(dst + curr_off, 0, next_off - curr_off);
+ curr_off += map->off_arr->field_sz[i];
+ }
+ memset(dst + curr_off, 0, map->value_size - curr_off);
+}
+
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
@@ -401,7 +451,7 @@ enum bpf_type_flag {
/* DYNPTR points to memory local to the bpf program. */
DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
- /* DYNPTR points to a ringbuf record. */
+ /* DYNPTR points to a kernel-produced ringbuf record. */
DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
/* Size is known at compile time. */
@@ -606,6 +656,7 @@ enum bpf_reg_type {
PTR_TO_MEM, /* reg points to valid memory region */
PTR_TO_BUF, /* reg points to a read/write buffer */
PTR_TO_FUNC, /* reg points to a bpf program function */
+ PTR_TO_DYNPTR, /* reg points to a dynptr */
__BPF_REG_TYPE_MAX,
/* Extended reg_types. */
@@ -726,10 +777,14 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_REG_ARGS 5
+/* The argument is a structure. */
+#define BTF_FMODEL_STRUCT_ARG BIT(0)
+
struct btf_func_model {
u8 ret_size;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
+ u8 arg_flags[MAX_BPF_FUNC_ARGS];
};
/* Restore arguments before returning from trampoline to let original function
@@ -809,6 +864,10 @@ u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx);
+u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
@@ -891,6 +950,7 @@ struct bpf_dispatcher {
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
int num_progs;
void *image;
+ void *rw_image;
u32 image_off;
struct bpf_ksym ksym;
};
@@ -909,7 +969,7 @@ int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampolin
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
-int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -923,7 +983,14 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
}, \
}
+#ifdef CONFIG_X86_64
+#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
+#else
+#define BPF_DISPATCHER_ATTRIBUTES
+#endif
+
#define DEFINE_BPF_DISPATCHER(name) \
+ notrace BPF_DISPATCHER_ATTRIBUTES \
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
@@ -945,7 +1012,6 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void *bpf_jit_alloc_exec_page(void);
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
@@ -1333,6 +1399,11 @@ struct bpf_array {
#define BPF_MAP_CAN_READ BIT(0)
#define BPF_MAP_CAN_WRITE BIT(1)
+/* Maximum number of user-producer ring buffer samples that can be drained in
+ * a call to bpf_user_ringbuf_drain().
+ */
+#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
+
static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
@@ -1729,8 +1800,40 @@ int bpf_obj_get_user(const char __user *pathname, int flags);
extern int bpf_iter_ ## target(args); \
int __init bpf_iter_ ## target(args) { return 0; }
+/*
+ * The task type of iterators.
+ *
+ * For BPF task iterators, they can be parameterized with various
+ * parameters to visit only some of tasks.
+ *
+ * BPF_TASK_ITER_ALL (default)
+ * Iterate over resources of every task.
+ *
+ * BPF_TASK_ITER_TID
+ * Iterate over resources of a task/tid.
+ *
+ * BPF_TASK_ITER_TGID
+ * Iterate over resources of every task of a process / task group.
+ */
+enum bpf_iter_task_type {
+ BPF_TASK_ITER_ALL = 0,
+ BPF_TASK_ITER_TID,
+ BPF_TASK_ITER_TGID,
+};
+
struct bpf_iter_aux_info {
+ /* for map_elem iter */
struct bpf_map *map;
+
+ /* for cgroup iter */
+ struct {
+ struct cgroup *start; /* starting cgroup */
+ enum bpf_cgroup_iter_order order;
+ } cgroup;
+ struct {
+ enum bpf_iter_task_type type;
+ u32 pid;
+ } task;
};
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
@@ -1815,22 +1918,6 @@ int bpf_get_file_flag(int flags);
int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
-/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
- * forced to use 'long' read/writes to try to atomically copy long counters.
- * Best-effort only. No barriers here, since it _will_ race with concurrent
- * updates from BPF programs. Called from bpf syscall and mostly used with
- * size 8 or 16 bytes, so ask compiler to inline it.
- */
-static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
-{
- const long *lsrc = src;
- long *ldst = dst;
-
- size /= sizeof(long);
- while (size--)
- *ldst++ = *lsrc++;
-}
-
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
@@ -1932,13 +2019,22 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
const char *func_name,
struct btf_func_model *m);
+struct bpf_kfunc_arg_meta {
+ u64 r0_size;
+ bool r0_rdonly;
+ int ref_obj_id;
+ u32 flags;
+};
+
struct bpf_reg_state;
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
+int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
const struct btf *btf, u32 func_id,
struct bpf_reg_state *regs,
- u32 kfunc_flags);
+ struct bpf_kfunc_arg_meta *meta);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
@@ -1966,6 +2062,17 @@ static inline bool unprivileged_ebpf_enabled(void)
return !sysctl_unprivileged_bpf_disabled;
}
+/* Not all bpf prog type has the bpf_ctx.
+ * For the bpf prog type that has initialized the bpf_ctx,
+ * this function can be used to decide if a kernel function
+ * is called by a bpf program.
+ */
+static inline bool has_current_bpf_ctx(void)
+{
+ return !!current->bpf_ctx;
+}
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -2148,6 +2255,15 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
return ERR_PTR(-ENOTSUPP);
}
+static inline int btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf *btf,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag)
+{
+ return -EACCES;
+}
+
static inline const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)
{
@@ -2175,6 +2291,14 @@ static inline bool unprivileged_ebpf_enabled(void)
return false;
}
+static inline bool has_current_bpf_ctx(void)
+{
+ return false;
+}
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
@@ -2349,6 +2473,7 @@ extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
@@ -2361,6 +2486,7 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
@@ -2410,6 +2536,7 @@ extern const struct bpf_func_proto bpf_loop_proto;
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
extern const struct bpf_func_proto bpf_set_retval_proto;
extern const struct bpf_func_proto bpf_get_retval_proto;
+extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -2554,7 +2681,7 @@ enum bpf_dynptr_type {
BPF_DYNPTR_TYPE_INVALID,
/* Points to memory that is local to the bpf program */
BPF_DYNPTR_TYPE_LOCAL,
- /* Underlying data is a ringbuf record */
+ /* Underlying data is a kernel-produced ringbuf record */
BPF_DYNPTR_TYPE_RINGBUF,
};
@@ -2562,6 +2689,7 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
int bpf_dynptr_check_size(u32 size);
+u32 bpf_dynptr_get_size(struct bpf_dynptr_kern *ptr);
#ifdef CONFIG_BPF_LSM
void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
@@ -2571,4 +2699,12 @@ static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
#endif /* CONFIG_BPF_LSM */
+struct key;
+
+#ifdef CONFIG_KEYS
+struct bpf_key {
+ struct key *key;
+ bool has_ref;
+};
+#endif /* CONFIG_KEYS */
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
new file mode 100644
index 000000000000..3e164b8efaa9
--- /dev/null
+++ b/include/linux/bpf_mem_alloc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_MEM_ALLOC_H
+#define _BPF_MEM_ALLOC_H
+#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
+
+struct bpf_mem_cache;
+struct bpf_mem_caches;
+
+struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
+ struct work_struct work;
+};
+
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+
+/* kmalloc/kfree equivalent: */
+void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
+void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+
+/* kmem_cache_alloc/free equivalent: */
+void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
+void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+
+#endif /* _BPF_MEM_ALLOC_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 2b9112b80171..2c6a4f2562a7 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -126,6 +126,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 2e3bad8640dc..9e1e6965f407 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -212,6 +212,17 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
+ /* There can be a case like:
+ * main (frame 0)
+ * cb (frame 1)
+ * func (frame 3)
+ * cb (frame 4)
+ * Hence for frame 4, if callback_ref just stored boolean, it would be
+ * impossible to distinguish nested callback refs. Hence store the
+ * frameno and compare that to callback_ref in check_reference_leak when
+ * exiting a callback function.
+ */
+ int callback_ref;
};
/* state of the program:
@@ -237,6 +248,7 @@ struct bpf_func_state {
*/
u32 async_entry_cnt;
bool in_callback_fn;
+ struct tnum callback_ret_range;
bool in_async_callback_fn;
/* The following fields should be last. See copy_func_state() */
@@ -337,6 +349,27 @@ struct bpf_verifier_state {
iter < frame->allocated_stack / BPF_REG_SIZE; \
iter++, reg = bpf_get_spilled_reg(iter, frame))
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ ({ \
+ struct bpf_verifier_state *___vstate = __vst; \
+ int ___i, ___j; \
+ for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
+ struct bpf_reg_state *___regs; \
+ __state = ___vstate->frame[___i]; \
+ ___regs = __state->regs; \
+ for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
+ __reg = &___regs[___j]; \
+ (void)(__expr); \
+ } \
+ bpf_for_each_spilled_reg(___j, __state, __reg) { \
+ if (!__reg) \
+ continue; \
+ (void)(__expr); \
+ } \
+ } \
+ })
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -560,6 +593,11 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state
u32 regno);
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno, u32 mem_size);
+bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg);
+bool is_dynptr_type_expected(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg,
+ enum bpf_arg_type arg_type);
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
@@ -587,6 +625,8 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
struct bpf_attach_target_info *tgt_info);
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+
#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
/* extract base type from bpf_{arg, return, reg}_type. */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 6ff567ece34a..9e77165f3ef6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -293,6 +293,7 @@
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_STANDBY 0x0008 /* Standby enable */
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
diff --git a/include/linux/btf.h b/include/linux/btf.h
index cdb376d53238..f9aababc5d78 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -49,6 +49,17 @@
* for this case.
*/
#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
+#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
+#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
+
+/*
+ * Return the name of the passed struct, if exists, or halt the build if for
+ * example the structure gets renamed. In this way, developers have to revisit
+ * the code using that structure name, and update it accordingly.
+ */
+#define stringify_struct(x) \
+ ({ BUILD_BUG_ON(sizeof(struct x) < 0); \
+ __stringify(x); })
struct btf;
struct btf_member;
@@ -439,4 +450,14 @@ static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dt
}
#endif
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
+
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index df518c429667..33fa5e94aa80 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -225,8 +225,6 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
-void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
- gfp_t gfp);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
@@ -236,15 +234,16 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
-int submit_bh(blk_opf_t, struct buffer_head *);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
extern int buffer_heads_over_limit;
@@ -351,12 +350,6 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline void
-sb_breadahead_unmovable(struct super_block *sb, sector_t block)
-{
- __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
-}
-
static inline struct buffer_head *
sb_getblk(struct super_block *sb, sector_t block)
{
@@ -418,6 +411,41 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
}
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
+}
+
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
+{
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
+{
+ __bh_read_batch(nr, bhs, op_flags, false);
+}
+
/**
* __bread() - reads a specified block and returns the bh
* @bdev: the block_device to read from
diff --git a/include/linux/cache.h b/include/linux/cache.h
index d742c57eaee5..5da1bbd96154 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -85,4 +85,17 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index c3e50e537e39..58f5431a5559 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -147,6 +147,11 @@ static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
return priv->ctrlmode & ~priv->ctrlmode_supported;
}
+static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
+{
+ return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
+}
+
void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 182749e858b3..1abc25a8d144 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -20,7 +20,8 @@ void can_flush_echo_skb(struct net_device *dev);
int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx, unsigned int frame_len);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
- u8 *len_ptr, unsigned int *frame_len_ptr);
+ unsigned int *len_ptr,
+ unsigned int *frame_len_ptr);
unsigned int __must_check can_get_echo_skb(struct net_device *dev,
unsigned int idx,
unsigned int *frame_len_ptr);
@@ -29,6 +30,9 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
struct sk_buff *alloc_canfd_skb(struct net_device *dev,
struct canfd_frame **cfd);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len);
struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
@@ -97,10 +101,59 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
return nskb;
}
+static inline bool can_is_can_skb(const struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CAN_MTU && cf->len <= CAN_MAX_DLEN);
+}
+
static inline bool can_is_canfd_skb(const struct sk_buff *skb)
{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
/* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
+ return (skb->len == CANFD_MTU && cfd->len <= CANFD_MAX_DLEN);
+}
+
+static inline bool can_is_canxl_skb(const struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+
+ if (skb->len < CANXL_HDR_SIZE + CANXL_MIN_DLEN || skb->len > CANXL_MTU)
+ return false;
+
+ /* this also checks valid CAN XL data length boundaries */
+ if (skb->len != CANXL_HDR_SIZE + cxl->len)
+ return false;
+
+ return cxl->flags & CANXL_XLF;
+}
+
+/* get length element value from can[|fd|xl]_frame structure */
+static inline unsigned int can_skb_get_len_val(struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (can_is_canxl_skb(skb))
+ return cxl->len;
+
+ return cfd->len;
+}
+
+/* get needed data length inside CAN frame for all frame types (RTR aware) */
+static inline unsigned int can_skb_get_data_len(struct sk_buff *skb)
+{
+ unsigned int len = can_skb_get_len_val(skb);
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* RTR frames have an actual length of zero */
+ if (can_is_can_skb(skb) && cf->can_id & CAN_RTR_FLAG)
+ return 0;
+
+ return len;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index e7f2fb2fc207..99c1726be6ee 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -207,7 +207,6 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
- bool last_piece; /* current is last piece */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -498,8 +497,7 @@ void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq);
void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
struct ceph_msg *msg, size_t length);
struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
- size_t *page_offset, size_t *length,
- bool *last_piece);
+ size_t *page_offset, size_t *length);
void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index c6dfc1ed0626..5e134f4ce8b7 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -2,49 +2,38 @@
/*
* Clang Control Flow Integrity (CFI) support.
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2022 Google LLC
*/
#ifndef _LINUX_CFI_H
#define _LINUX_CFI_H
-#ifdef CONFIG_CFI_CLANG
-typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag);
-
-/* Compiler-generated function in each module, and the kernel */
-extern void __cfi_check(uint64_t id, void *ptr, void *diag);
-
-/*
- * Force the compiler to generate a CFI jump table entry for a function
- * and store the jump table address to __cfi_jt_<function>.
- */
-#define __CFI_ADDRESSABLE(fn, __attr) \
- const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn
-
-#ifdef CONFIG_CFI_CLANG_SHADOW
-
-extern void cfi_module_add(struct module *mod, unsigned long base_addr);
-extern void cfi_module_remove(struct module *mod, unsigned long base_addr);
-
-#else
+#include <linux/bug.h>
+#include <linux/module.h>
-static inline void cfi_module_add(struct module *mod, unsigned long base_addr) {}
-static inline void cfi_module_remove(struct module *mod, unsigned long base_addr) {}
-
-#endif /* CONFIG_CFI_CLANG_SHADOW */
-
-#else /* !CONFIG_CFI_CLANG */
-
-#ifdef CONFIG_X86_KERNEL_IBT
-
-#define __CFI_ADDRESSABLE(fn, __attr) \
- const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
+ unsigned long *target, u32 type);
-#endif /* CONFIG_X86_KERNEL_IBT */
+static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return report_cfi_failure(regs, addr, NULL, 0);
+}
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+bool is_cfi_trap(unsigned long addr);
+#endif
#endif /* CONFIG_CFI_CLANG */
-#ifndef __CFI_ADDRESSABLE
-#define __CFI_ADDRESSABLE(fn, __attr)
-#endif
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+void module_cfi_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod);
+#else
+static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod) {}
+#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
+#endif /* CONFIG_MODULES */
#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
new file mode 100644
index 000000000000..6b8713675765
--- /dev/null
+++ b/include/linux/cfi_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI_CLANG
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI_CLANG */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI_CLANG */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4bcf56b3491c..6e01f10f0d88 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -126,11 +126,11 @@ enum {
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
- CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+ __CFTYPE_ADDED = (1 << 18),
};
/*
@@ -384,7 +384,7 @@ struct cgroup {
/*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
- * ancestor_ids[] can determine whether a given cgroup is a
+ * ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
@@ -428,6 +428,9 @@ struct cgroup {
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
@@ -504,8 +507,8 @@ struct cgroup {
/* Used to store internal freezer state */
struct cgroup_freezer_state freezer;
- /* ids of the ancestors at each level including self */
- u64 ancestor_ids[];
+ /* All ancestors including self */
+ struct cgroup *ancestors[];
};
/*
@@ -522,11 +525,15 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
- /* The root cgroup. Root is destroyed on its release. */
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+ * following field. cgrp_ancestor_storage must immediately follow.
+ */
struct cgroup cgrp;
- /* for cgrp->ancestor_ids[0] */
- u64 cgrp_ancestor_id_storage;
+ /* must follow cgrp for cgrp->ancestors[0], see above */
+ struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ac5d0515680e..f2a9f2274c3b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -114,6 +114,7 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
+void cgroup_file_show(struct cgroup_file *cfile, bool show);
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -432,6 +433,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -446,7 +459,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
@@ -574,7 +586,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
+ return cgrp->ancestors[ancestor->level] == ancestor;
}
/**
@@ -591,11 +603,9 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- if (cgrp->level < ancestor_level)
+ if (ancestor_level < 0 || ancestor_level > cgrp->level)
return NULL;
- while (cgrp && cgrp->level > ancestor_level)
- cgrp = cgroup_parent(cgrp);
- return cgrp;
+ return cgrp->ancestors[ancestor_level];
}
/**
@@ -672,11 +682,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return cgrp->psi;
-}
-
bool cgroup_psi_enabled(void);
static inline void cgroup_init_kthreadd(void)
@@ -708,6 +713,8 @@ struct cgroup;
static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
@@ -747,11 +754,6 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
{}
-
-static inline struct cgroup *cgroup_get_from_id(u64 id)
-{
- return NULL;
-}
#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1615010aa0ec..2108b5695327 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -350,7 +350,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
- unsigned long clk_fixed_flags);
+ unsigned long clk_fixed_flags, bool devm);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
@@ -365,7 +365,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
+
+/**
+ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, true)
/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
@@ -378,7 +391,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
/**
* clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
* the clock framework
@@ -392,7 +405,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), (flags), (fixed_rate), 0, \
- 0)
+ 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
* the clock framework
@@ -408,7 +421,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
* clock with the clock framework
@@ -423,7 +436,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
parent_hw, flags, fixed_rate, fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
* clock with the clock framework
@@ -438,7 +451,21 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
parent_data, flags, fixed_rate, fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), NULL, (flags), \
- (fixed_rate), (fixed_accuracy), 0)
+ (fixed_rate), (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
+ flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ CLK_FIXED_RATE_PARENT_ACCURACY, false)
void clk_unregister_fixed_rate(struct clk *clk);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
@@ -957,6 +984,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
(parent_names), NULL, NULL, (flags), (reg), \
(shift), (mask), (clk_mux_flags), (table), \
(lock))
+#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
+ num_parents, flags, reg, shift, mask, \
+ clk_mux_flags, table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ NULL, NULL, (parent_data), (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
__clk_hw_register_mux((dev), NULL, (name), (num_parents), \
@@ -974,6 +1008,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
__clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
(parent_data), (flags), (reg), (shift), \
BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
__devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \
@@ -987,6 +1028,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
(parent_hws), NULL, (flags), (reg), \
(shift), BIT((width)) - 1, \
(clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ NULL, (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
unsigned int val);
@@ -1454,7 +1502,7 @@ int devm_of_clk_add_hw_provider(struct device *dev,
void *data),
void *data);
void of_clk_del_provider(struct device_node *np);
-void devm_of_clk_del_provider(struct device *dev);
+
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -1491,7 +1539,7 @@ static inline int devm_of_clk_add_hw_provider(struct device *dev,
return 0;
}
static inline void of_clk_del_provider(struct device_node *np) {}
-static inline void devm_of_clk_del_provider(struct device *dev) {}
+
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
index 8a7b5cd7eac0..f6ebab6228c2 100644
--- a/include/linux/clk/davinci.h
+++ b/include/linux/clk/davinci.h
@@ -28,13 +28,5 @@ int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgch
int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
int dm365_psc_init(struct device *dev, void __iomem *base);
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm644x_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm646x_psc_init(struct device *dev, void __iomem *base);
-#endif
#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
index a64d034ceddd..eaf95ca656f8 100644
--- a/include/linux/clk/spear.h
+++ b/include/linux/clk/spear.h
@@ -8,6 +8,20 @@
#ifndef __LINUX_CLK_SPEAR_H
#define __LINUX_CLK_SPEAR_H
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
#ifdef CONFIG_MACH_SPEAR1310
void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
#else
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 8a8423eb8e9a..45570bc21a43 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -46,6 +46,4 @@ int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id);
-void devm_clk_release_clkdev(struct device *dev, const char *con_id,
- const char *dev_id);
#endif
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index c84fec767445..6cfd6902bd5b 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -51,6 +51,29 @@
#define __no_sanitize_undefined
#endif
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
+/*
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
+ */
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
+/*
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
+ */
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+#endif
+
/*
* Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
* with no_sanitize("coverage"). Prior versions of Clang support coverage
@@ -66,17 +89,9 @@
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-#define __nocfi __attribute__((__no_sanitize__("cfi")))
-#define __cficanonical __attribute__((__cfi_canonical_jump_table__))
-
-#if defined(CONFIG_CFI_CLANG)
-/*
- * With CONFIG_CFI_CLANG, the compiler replaces function address
- * references with the address of the function's CFI jump table
- * entry. The function_nocfi macro always returns the address of the
- * actual function instead.
- */
-#define function_nocfi(x) __builtin_function_start(x)
+#if __has_feature(kcfi)
+/* Disable CFI checking inside a function. */
+#define __nocfi __attribute__((__no_sanitize__("kcfi")))
#endif
/*
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 9b157b71036f..f55a37efdb97 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -115,6 +115,12 @@
#endif
/*
+ * GCC does not support KMSAN.
+ */
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+
+/*
* Turn individual warnings and errors on and off locally, depending
* on version.
*/
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 7713d7bcdaea..973a1bfd7ef5 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -203,16 +203,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__v; \
})
-/*
- * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
- * instrumented C code with jump table addresses. Architectures that
- * support CFI can define this macro to return the actual function address
- * when needed.
- */
-#ifndef function_nocfi
-#define function_nocfi(x) (x)
-#endif
-
#endif /* __KERNEL__ */
/*
@@ -221,9 +211,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
-#define __ADDRESSABLE(sym) \
- static void * __section(".discard.addressable") __used \
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
+#define __ADDRESSABLE(sym) \
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 445e80517cab..898b3458b24a 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -35,7 +35,8 @@
/*
* Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
- * available and includes other attributes.
+ * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
+ * in compiler-gcc.h, due to misbehaviors.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
@@ -371,4 +372,11 @@
*/
#define __weak __attribute__((__weak__))
+/*
+ * Used by functions that use '__builtin_return_address'. These function
+ * don't want to be splited or made inline, which can make
+ * the '__builtin_return_address' get unexpected address.
+ */
+#define __fix_address noinline __noclone
+
#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4f2a819fd60a..eb0466236661 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -4,8 +4,12 @@
#ifndef __ASSEMBLY__
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
- __has_attribute(btf_type_tag)
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
#else
# define BTF_TYPE_TAG(value) /* nothing */
@@ -229,7 +233,8 @@ struct ftrace_likely_data {
/* Section for code which can't be instrumented at all */
#define noinstr \
noinline notrace __attribute((__section__(".noinstr.text"))) \
- __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory
#endif /* __KERNEL__ */
@@ -265,20 +270,18 @@ struct ftrace_likely_data {
# define __nocfi
#endif
-#ifndef __cficanonical
-# define __cficanonical
-#endif
-
/*
* Any place that could be marked with the "alloc_size" attribute is also
- * a place to be marked with the "malloc" attribute. Do this as part of the
- * __alloc_size macro to avoid redundant attributes and to avoid missing a
- * __malloc marking.
+ * a place to be marked with the "malloc" attribute, except those that may
+ * be performing a _reallocation_, as that may alias the existing pointer.
+ * For these, use __realloc_size().
*/
#ifdef __alloc_size__
# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
#else
# define __alloc_size(x, ...) __malloc
+# define __realloc_size(x, ...)
#endif
#ifndef asm_volatile_goto
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 51d9ab079629..62b32b19e0a8 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -103,6 +103,7 @@ extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_state(struct completion *x, unsigned int state);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 9f445f09fcfe..1554021231f9 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -372,6 +372,29 @@ static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
return csa->read(offset, true, false);
}
+static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ return readl_relaxed(csa->base + lo_offset) |
+ ((u64)readl_relaxed(csa->base + hi_offset) << 32);
+ }
+
+ return csa->read(lo_offset, true, false) | (csa->read(hi_offset, true, false) << 32);
+}
+
+static inline void csdev_access_relaxed_write_pair(struct csdev_access *csa, u64 val,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ writel_relaxed((u32)val, csa->base + lo_offset);
+ writel_relaxed((u32)(val >> 32), csa->base + hi_offset);
+ } else {
+ csa->write((u32)val, lo_offset, true, false);
+ csa->write((u32)(val >> 32), hi_offset, true, false);
+ }
+}
+
static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
{
if (likely(csa->io_mem))
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 1fe17f5adb09..c41fa602ed28 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -31,6 +31,8 @@ enum counter_comp_type {
COUNTER_COMP_ENUM,
COUNTER_COMP_COUNT_DIRECTION,
COUNTER_COMP_COUNT_MODE,
+ COUNTER_COMP_SIGNAL_POLARITY,
+ COUNTER_COMP_ARRAY,
};
/**
@@ -38,66 +40,114 @@ enum counter_comp_type {
* @type: Counter component data type
* @name: device-specific component name
* @priv: component-relevant data
- * @action_read: Synapse action mode read callback. The read value of the
+ * @action_read: Synapse action mode read callback. The read value of the
* respective Synapse action mode should be passed back via
* the action parameter.
- * @device_u8_read: Device u8 component read callback. The read value of the
+ * @device_u8_read: Device u8 component read callback. The read value of the
* respective Device u8 component should be passed back via
* the val parameter.
- * @count_u8_read: Count u8 component read callback. The read value of the
+ * @count_u8_read: Count u8 component read callback. The read value of the
* respective Count u8 component should be passed back via
* the val parameter.
- * @signal_u8_read: Signal u8 component read callback. The read value of the
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
* respective Signal u8 component should be passed back via
* the val parameter.
- * @device_u32_read: Device u32 component read callback. The read value of
+ * @device_u32_read: Device u32 component read callback. The read value of
* the respective Device u32 component should be passed
* back via the val parameter.
- * @count_u32_read: Count u32 component read callback. The read value of the
+ * @count_u32_read: Count u32 component read callback. The read value of the
* respective Count u32 component should be passed back via
* the val parameter.
- * @signal_u32_read: Signal u32 component read callback. The read value of
+ * @signal_u32_read: Signal u32 component read callback. The read value of
* the respective Signal u32 component should be passed
* back via the val parameter.
- * @device_u64_read: Device u64 component read callback. The read value of
+ * @device_u64_read: Device u64 component read callback. The read value of
* the respective Device u64 component should be passed
* back via the val parameter.
- * @count_u64_read: Count u64 component read callback. The read value of the
+ * @count_u64_read: Count u64 component read callback. The read value of the
* respective Count u64 component should be passed back via
* the val parameter.
- * @signal_u64_read: Signal u64 component read callback. The read value of
+ * @signal_u64_read: Signal u64 component read callback. The read value of
* the respective Signal u64 component should be passed
* back via the val parameter.
- * @action_write: Synapse action mode write callback. The write value of
+ * @signal_array_u32_read: Signal u32 array component read callback. The
+ * index of the respective Count u32 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u32 array component element should be
+ * passed back via the val parameter.
+ * @device_array_u64_read: Device u64 array component read callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Device u64 array component element should be
+ * passed back via the val parameter.
+ * @count_array_u64_read: Count u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @signal_array_u64_read: Signal u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @action_write: Synapse action mode write callback. The write value of
* the respective Synapse action mode is passed via the
* action parameter.
- * @device_u8_write: Device u8 component write callback. The write value of
+ * @device_u8_write: Device u8 component write callback. The write value of
* the respective Device u8 component is passed via the val
* parameter.
- * @count_u8_write: Count u8 component write callback. The write value of
+ * @count_u8_write: Count u8 component write callback. The write value of
* the respective Count u8 component is passed via the val
* parameter.
- * @signal_u8_write: Signal u8 component write callback. The write value of
+ * @signal_u8_write: Signal u8 component write callback. The write value of
* the respective Signal u8 component is passed via the val
* parameter.
- * @device_u32_write: Device u32 component write callback. The write value of
+ * @device_u32_write: Device u32 component write callback. The write value of
* the respective Device u32 component is passed via the
* val parameter.
- * @count_u32_write: Count u32 component write callback. The write value of
+ * @count_u32_write: Count u32 component write callback. The write value of
* the respective Count u32 component is passed via the val
* parameter.
- * @signal_u32_write: Signal u32 component write callback. The write value of
+ * @signal_u32_write: Signal u32 component write callback. The write value of
* the respective Signal u32 component is passed via the
* val parameter.
- * @device_u64_write: Device u64 component write callback. The write value of
+ * @device_u64_write: Device u64 component write callback. The write value of
* the respective Device u64 component is passed via the
* val parameter.
- * @count_u64_write: Count u64 component write callback. The write value of
+ * @count_u64_write: Count u64 component write callback. The write value of
* the respective Count u64 component is passed via the val
* parameter.
- * @signal_u64_write: Signal u64 component write callback. The write value of
+ * @signal_u64_write: Signal u64 component write callback. The write value of
* the respective Signal u64 component is passed via the
* val parameter.
+ * @signal_array_u32_write: Signal u32 array component write callback. The
+ * index of the respective Signal u32 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u32 array component element is passed via
+ * the val parameter.
+ * @device_array_u64_write: Device u64 array component write callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Device u64 array component element is passed via
+ * the val parameter.
+ * @count_array_u64_write: Count u64 array component write callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Count u64 array component element is passed via
+ * the val parameter.
+ * @signal_array_u64_write: Signal u64 array component write callback. The
+ * index of the respective Signal u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u64 array component element is passed via
+ * the val parameter.
*/
struct counter_comp {
enum counter_comp_type type;
@@ -125,6 +175,17 @@ struct counter_comp {
struct counter_count *count, u64 *val);
int (*signal_u64_read)(struct counter_device *counter,
struct counter_signal *signal, u64 *val);
+ int (*signal_array_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *val);
+ int (*device_array_u64_read)(struct counter_device *counter,
+ size_t idx, u64 *val);
+ int (*count_array_u64_read)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *val);
+ int (*signal_array_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 *val);
};
union {
int (*action_write)(struct counter_device *counter,
@@ -148,6 +209,17 @@ struct counter_comp {
struct counter_count *count, u64 val);
int (*signal_u64_write)(struct counter_device *counter,
struct counter_signal *signal, u64 val);
+ int (*signal_array_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 val);
+ int (*device_array_u64_write)(struct counter_device *counter,
+ size_t idx, u64 val);
+ int (*count_array_u64_write)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 val);
+ int (*signal_array_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 val);
};
};
@@ -452,6 +524,60 @@ struct counter_available {
.priv = &(_available), \
}
+struct counter_array {
+ enum counter_comp_type type;
+ const struct counter_available *avail;
+ union {
+ size_t length;
+ size_t idx;
+ };
+};
+
+#define DEFINE_COUNTER_ARRAY_U64(_name, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_U64, \
+ .length = (_length), \
+ }
+
+#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
+ DEFINE_COUNTER_ARRAY_U64(_name, _length)
+
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _enums, _length) \
+ DEFINE_COUNTER_AVAILABLE(_name##_available, _enums); \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .avail = &(_name##_available), \
+ .length = (_length), \
+ }
+
+#define COUNTER_COMP_DEVICE_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .device_array_u64_read = (_read), \
+ .device_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_COUNT_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .count_array_u64_read = (_read), \
+ .count_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_SIGNAL_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .signal_array_u64_read = (_read), \
+ .signal_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+
+#define COUNTER_COMP_CAPTURE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("capture", _read, _write)
+
#define COUNTER_COMP_CEILING(_read, _write) \
COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
@@ -477,10 +603,31 @@ struct counter_available {
#define COUNTER_COMP_FLOOR(_read, _write) \
COUNTER_COMP_COUNT_U64("floor", _read, _write)
+#define COUNTER_COMP_POLARITY(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .name = "polarity", \
+ .signal_u32_read = (_read), \
+ .signal_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
#define COUNTER_COMP_PRESET(_read, _write) \
COUNTER_COMP_COUNT_U64("preset", _read, _write)
#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
+#define COUNTER_COMP_ARRAY_CAPTURE(_read, _write, _array) \
+ COUNTER_COMP_COUNT_ARRAY_U64("capture", _read, _write, _array)
+
+#define COUNTER_COMP_ARRAY_POLARITY(_read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = "polarity", \
+ .signal_array_u32_read = (_read), \
+ .signal_array_u32_write = (_write), \
+ .priv = &(_array), \
+}
+
#endif /* _COUNTER_H_ */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bd047864c7ac..2f065ad97541 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -35,19 +35,23 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1U
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+static inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ nr_cpu_ids = nr;
#endif
+}
+
+/* Deprecated. Always use nr_cpu_ids. */
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -67,10 +71,6 @@ extern unsigned int nr_cpu_ids;
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -174,9 +174,8 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
@@ -189,9 +188,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
@@ -231,9 +229,8 @@ static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits, n + 1);
}
@@ -246,9 +243,7 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_set_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
/**
* for_each_cpu_not - iterate over every cpu in a complemented mask
@@ -258,17 +253,15 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_clear_bit(cpu, cpumask_bits(mask), nr_cpumask_bits)
#if NR_CPUS == 1
static inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
- if (n != -1)
- cpumask_check(n);
+ /* n is a prior cpu */
+ cpumask_check(n + 1);
/*
* Return the first available CPU when wrapping, or when starting before cpu0,
@@ -293,10 +286,8 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), nr_cpumask_bits, start)
/**
* for_each_cpu_and - iterate over every cpu in both masks
@@ -313,9 +304,25 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
- (cpu) < nr_cpu_ids;)
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
+
+/**
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), nr_cpumask_bits)
/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
@@ -337,6 +344,50 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
return i;
}
+/**
+ * cpumask_nth - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), nr_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the N'th cpu to find, starting from 0
+ *
+ * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static inline
+unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ nr_cpumask_bits, cpumask_check(cpu));
+}
+
#define CPU_BITS_NONE \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
@@ -587,6 +638,17 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
}
/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ */
+static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
+}
+
+/**
* cpumask_shift_right - *dstp = *srcp >> n
* @dstp: the cpumask result
* @srcp: the input to shift
@@ -1127,9 +1189,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
* cover a worst-case of every other cpu being on one of two nodes for a
* very large NR_CPUS.
*
- * Use PAGE_SIZE as a minimum for smaller configurations.
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
*/
-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 7b1f4a488230..620ada094c3b 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -216,13 +216,26 @@ struct damos_stat {
};
/**
- * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * struct damos_access_pattern - Target access pattern of the given scheme.
* @min_sz_region: Minimum size of target regions.
* @max_sz_region: Maximum size of target regions.
* @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
* @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
* @min_age_region: Minimum age of target regions.
* @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
* @action: &damo_action to be applied to the target regions.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
@@ -230,10 +243,8 @@ struct damos_stat {
* @list: List head for siblings.
*
* For each aggregation interval, DAMON finds regions which fit in the
- * condition (&min_sz_region, &max_sz_region, &min_nr_accesses,
- * &max_nr_accesses, &min_age_region, &max_age_region) and applies &action to
- * those. To avoid consuming too much CPU time or IO resources for the
- * &action, &quota is used.
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
*
* To do the work only when needed, schemes can be activated for specific
* system situations using &wmarks. If all schemes that registered to the
@@ -248,12 +259,7 @@ struct damos_stat {
* &action is applied.
*/
struct damos {
- unsigned long min_sz_region;
- unsigned long max_sz_region;
- unsigned int min_nr_accesses;
- unsigned int max_nr_accesses;
- unsigned int min_age_region;
- unsigned int max_age_region;
+ struct damos_access_pattern pattern;
enum damos_action action;
struct damos_quota quota;
struct damos_watermarks wmarks;
@@ -340,7 +346,7 @@ struct damon_operations {
unsigned long (*apply_scheme)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme);
- bool (*target_valid)(void *target);
+ bool (*target_valid)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
@@ -383,13 +389,15 @@ struct damon_callback {
};
/**
- * struct damon_ctx - Represents a context for each monitoring. This is the
- * main interface that allows users to set the attributes and get the results
- * of the monitoring.
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
*
* @sample_interval: The time between access samplings.
* @aggr_interval: The time between monitor results aggregations.
* @ops_update_interval: The time between monitoring operations updates.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
*
* For each @sample_interval, DAMON checks whether each region is accessed or
* not. It aggregates and keeps the access information (number of accesses to
@@ -399,7 +407,21 @@ struct damon_callback {
* @ops_update_interval. All time intervals are in micro-seconds.
* Please refer to &struct damon_operations and &struct damon_callback for more
* detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
*
+ * @attrs: Monitoring attributes for accuracy/overhead control.
* @kdamond: Kernel thread who does the monitoring.
* @kdamond_lock: Mutex for the synchronizations with @kdamond.
*
@@ -421,15 +443,11 @@ struct damon_callback {
* @ops: Set of monitoring operations for given use cases.
* @callback: Set of callbacks for monitoring events notifications.
*
- * @min_nr_regions: The minimum number of adaptive monitoring regions.
- * @max_nr_regions: The maximum number of adaptive monitoring regions.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
struct damon_ctx {
- unsigned long sample_interval;
- unsigned long aggr_interval;
- unsigned long ops_update_interval;
+ struct damon_attrs attrs;
/* private: internal use only */
struct timespec64 last_aggregation;
@@ -442,8 +460,6 @@ struct damon_ctx {
struct damon_operations ops;
struct damon_callback callback;
- unsigned long min_nr_regions;
- unsigned long max_nr_regions;
struct list_head adaptive_targets;
struct list_head schemes;
};
@@ -463,9 +479,23 @@ static inline struct damon_region *damon_last_region(struct damon_target *t)
return list_last_entry(&t->regions_list, struct damon_region, list);
}
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
#define damon_for_each_region_safe(r, next, t) \
list_for_each_entry_safe(r, next, &t->regions_list, list)
@@ -501,12 +531,9 @@ void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
unsigned int nr_ranges);
-struct damos *damon_new_scheme(
- unsigned long min_sz_region, unsigned long max_sz_region,
- unsigned int min_nr_accesses, unsigned int max_nr_accesses,
- unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota,
- struct damos_watermarks *wmarks);
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
void damon_destroy_scheme(struct damos *s);
@@ -519,10 +546,8 @@ unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
void damon_destroy_ctx(struct damon_ctx *ctx);
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long ops_upd_int,
- unsigned long min_nr_reg, unsigned long max_nr_reg);
-int damon_set_schemes(struct damon_ctx *ctx,
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
struct damos **schemes, ssize_t nr_schemes);
int damon_nr_running_ctxs(void);
bool damon_is_registered_ops(enum damon_ops_id id);
@@ -538,6 +563,9 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end);
+
#endif /* CONFIG_DAMON */
#endif /* _DAMON_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 92c78ed02b54..6b351e009f59 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -16,6 +16,7 @@
#include <linux/wait.h>
struct path;
+struct file;
struct vfsmount;
/*
@@ -250,7 +251,7 @@ extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
-extern void d_tmpfile(struct dentry *, struct inode *);
+extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
@@ -287,8 +288,8 @@ static inline unsigned d_count(const struct dentry *dentry)
/*
* helper function for dentry_operations.d_dname() members
*/
-extern __printf(4, 5)
-char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(3, 4)
+char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 58aea2d7385c..0da97dba9ef8 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -73,8 +73,8 @@ extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-extern void __delayacct_thrashing_start(void);
-extern void __delayacct_thrashing_end(void);
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
extern void __delayacct_swapin_start(void);
extern void __delayacct_swapin_end(void);
extern void __delayacct_compact_start(void);
@@ -143,22 +143,22 @@ static inline void delayacct_freepages_end(void)
__delayacct_freepages_end();
}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
{
if (!static_branch_unlikely(&delayacct_key))
return;
if (current->delays)
- __delayacct_thrashing_start();
+ __delayacct_thrashing_start(in_thrashing);
}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_thrashing_end(bool *in_thrashing)
{
if (!static_branch_unlikely(&delayacct_key))
return;
if (current->delays)
- __delayacct_thrashing_end();
+ __delayacct_thrashing_end(in_thrashing);
}
static inline void delayacct_swapin_start(void)
@@ -237,9 +237,9 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
{}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_thrashing_end(bool *in_thrashing)
{}
static inline void delayacct_swapin_start(void)
{}
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index ff951e9f6f20..c6bc2b5ee7e6 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -56,9 +56,6 @@ struct dlm_lockspace_ops {
* DLM_LSFL_TIMEWARN
* The dlm should emit netlink messages if locks have been waiting
* for a configurable amount of time. (Unused.)
- * DLM_LSFL_FS
- * The lockspace user is in the kernel (i.e. filesystem). Enables
- * direct bast/cast callbacks.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
*
@@ -134,7 +131,7 @@ int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*lockast) (void *astarg),
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
deleted file mode 100644
index 24607dc3c2ac..000000000000
--- a/include/linux/dma-iommu.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2014-2015 ARM Ltd.
- */
-#ifndef __DMA_IOMMU_H
-#define __DMA_IOMMU_H
-
-#include <linux/errno.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-#include <linux/msi.h>
-
-/* Domain management interface for IOMMU drivers */
-int iommu_get_dma_cookie(struct iommu_domain *domain);
-int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-void iommu_put_dma_cookie(struct iommu_domain *domain);
-
-/* Setup call for arch DMA mapping code */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
-int iommu_dma_init_fq(struct iommu_domain *domain);
-
-/* The DMA API isn't _quite_ the whole story, though... */
-/*
- * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
- *
- * The MSI page will be stored in @desc.
- *
- * Return: 0 on success otherwise an error describing the failure.
- */
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
-
-/* Update the MSI message if required. */
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg);
-
-void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
-
-void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
- struct iommu_domain *domain);
-
-extern bool iommu_dma_forcedac;
-
-#else /* CONFIG_IOMMU_DMA */
-
-struct iommu_domain;
-struct msi_desc;
-struct msi_msg;
-struct device;
-
-static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 dma_limit)
-{
-}
-
-static inline int iommu_dma_init_fq(struct iommu_domain *domain)
-{
- return -EINVAL;
-}
-
-static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
-{
- return -ENODEV;
-}
-
-static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-{
- return -ENODEV;
-}
-
-static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-}
-
-static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
- phys_addr_t msi_addr)
-{
- return 0;
-}
-
-static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
-{
-}
-
-static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
-{
-}
-
-#endif /* CONFIG_IOMMU_DMA */
-#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index c8ccbc94d5d2..0637659a702c 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -62,6 +62,11 @@ struct dma_resv_list;
* For example when asking for WRITE fences then the KERNEL fences are returned
* as well. Similar when asked for READ fences then both WRITE and KERNEL
* fences are returned as well.
+ *
+ * Already used fences can be promoted in the sense that a fence with
+ * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
+ * with this usage. But fences can never be degraded in the sense that a fence
+ * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
*/
enum dma_resv_usage {
/**
@@ -98,10 +103,15 @@ enum dma_resv_usage {
* @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
*
* This should be used by submissions which don't want to participate in
- * implicit synchronization.
+ * any implicit synchronization.
+ *
+ * The most common case are preemption fences, page table updates, TLB
+ * flushes as well as explicit synced user submissions.
*
- * The most common case are preemption fences as well as page table
- * updates and their TLB flushes.
+ * Explicit synced user user submissions can be promoted to
+ * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
+ * dma_buf_import_sync_file() when implicit synchronization should
+ * become necessary after initial adding of the fence.
*/
DMA_RESV_USAGE_BOOKKEEP
};
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index a6b7bc707356..77ea602c287c 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -8,11 +8,13 @@
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
-#include <linux/device.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
+struct device;
struct hsu_dma;
/**
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 8917a32173c4..d81a51978d01 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -65,7 +65,6 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
-extern int intel_iommu_enabled;
#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
@@ -89,8 +88,7 @@ extern int intel_iommu_enabled;
static inline bool dmar_rcu_check(void)
{
return rwsem_is_locked(&dmar_global_lock) ||
- system_state == SYSTEM_BOOTING ||
- (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
+ system_state == SYSTEM_BOOTING;
}
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index dce631e678dd..41682278d2e8 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -6,6 +6,8 @@
#include <linux/jump_label.h>
#endif
+#include <linux/build_bug.h>
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -21,6 +23,9 @@ struct _ddebug {
const char *filename;
const char *format;
unsigned int lineno:18;
+#define CLS_BITS 6
+ unsigned int class_id:CLS_BITS;
+#define _DPRINTK_CLASS_DFLT ((1 << CLS_BITS) - 1)
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
@@ -51,15 +56,82 @@ struct _ddebug {
#endif
} __attribute__((aligned(8)));
-
+enum class_map_type {
+ DD_CLASS_TYPE_DISJOINT_BITS,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_BITS: classes are independent, one per bit.
+ * expecting hex input. Built for drm.debug, basis for other types.
+ */
+ DD_CLASS_TYPE_LEVEL_NUM,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NUM: input is numeric level, 0-N.
+ * N turns on just bits N-1 .. 0, so N=0 turns all bits off.
+ */
+ DD_CLASS_TYPE_DISJOINT_NAMES,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * classes are independent, like _DISJOINT_BITS.
+ */
+ DD_CLASS_TYPE_LEVEL_NAMES,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * intended for names like: INFO,DEBUG,TRACE, with a module prefix
+ * avoid EMERG,ALERT,CRIT,ERR,WARNING: they're not debug
+ */
+};
+
+struct ddebug_class_map {
+ struct list_head link;
+ struct module *mod;
+ const char *mod_name; /* needed for builtins */
+ const char **class_names;
+ const int length;
+ const int base; /* index of 1st .class_id, allows split/shared space */
+ enum class_map_type map_type;
+};
+
+/**
+ * DECLARE_DYNDBG_CLASSMAP - declare classnames known by a module
+ * @_var: a struct ddebug_class_map, passed to module_param_cb
+ * @_type: enum class_map_type, chooses bits/verbose, numeric/symbolic
+ * @_base: offset of 1st class-name. splits .class_id space
+ * @classes: class-names used to control class'd prdbgs
+ */
+#define DECLARE_DYNDBG_CLASSMAP(_var, _maptype, _base, ...) \
+ static const char *_var##_classnames[] = { __VA_ARGS__ }; \
+ static struct ddebug_class_map __aligned(8) __used \
+ __section("__dyndbg_classes") _var = { \
+ .mod = THIS_MODULE, \
+ .mod_name = KBUILD_MODNAME, \
+ .base = _base, \
+ .map_type = _maptype, \
+ .length = NUM_TYPE_ARGS(char*, __VA_ARGS__), \
+ .class_names = _var##_classnames, \
+ }
+#define NUM_TYPE_ARGS(eltype, ...) \
+ (sizeof((eltype[]){__VA_ARGS__}) / sizeof(eltype))
+
+/* encapsulate linker provided built-in (or module) dyndbg data */
+struct _ddebug_info {
+ struct _ddebug *descs;
+ struct ddebug_class_map *classes;
+ unsigned int num_descs;
+ unsigned int num_classes;
+};
+
+struct ddebug_class_param {
+ union {
+ unsigned long *bits;
+ unsigned int *lvl;
+ };
+ char flags[8];
+ const struct ddebug_class_map *map;
+};
#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
-/* exported for module authors to exercise >control */
-int dynamic_debug_exec_queries(const char *query, const char *modname);
+int ddebug_add_module(struct _ddebug_info *dyndbg, const char *modname);
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname);
extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
@@ -87,7 +159,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
const struct ib_device *ibdev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
__section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
@@ -96,8 +168,14 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
+ .class_id = cls, \
_DPRINTK_KEY_INIT \
- }
+ }; \
+ BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \
+ "classid value overflow")
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, _DPRINTK_CLASS_DFLT, fmt)
#ifdef CONFIG_JUMP_LABEL
@@ -128,17 +206,34 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
#endif /* CONFIG_JUMP_LABEL */
-#define __dynamic_func_call(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(&id, ##__VA_ARGS__); \
-} while (0)
-
-#define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
+/*
+ * Factory macros: ($prefix)dynamic_func_call($suffix)
+ *
+ * Lower layer (with __ prefix) gets the callsite metadata, and wraps
+ * the func inside a debug-branch/static-key construct. Upper layer
+ * (with _ prefix) does the UNIQUE_ID once, so that lower can ref the
+ * name/label multiple times, and tie the elements together.
+ * Multiple flavors:
+ * (|_cls): adds in _DPRINT_CLASS_DFLT as needed
+ * (|_no_desc): former gets callsite descriptor as 1st arg (for prdbgs)
+ */
+#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(__VA_ARGS__); \
+ func(&id, ##__VA_ARGS__); \
+} while (0)
+#define __dynamic_func_call(id, fmt, func, ...) \
+ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) \
+ func(__VA_ARGS__); \
} while (0)
+#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
+ fmt, func, ##__VA_ARGS__)
/*
* "Factory macro" for generating a call to func, guarded by a
@@ -148,22 +243,33 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
* the varargs. Note that fmt is repeated in invocations of this
* macro.
*/
+#define _dynamic_func_call_cls(cls, fmt, func, ...) \
+ __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__)
#define _dynamic_func_call(fmt, func, ...) \
- __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+ _dynamic_func_call_cls(_DPRINTK_CLASS_DFLT, fmt, func, ##__VA_ARGS__)
+
/*
* A variant that does the same, except that the descriptor is not
* passed as the first argument to the function; it is only called
* with precisely the macro's varargs.
*/
-#define _dynamic_func_call_no_desc(fmt, func, ...) \
- __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+#define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(__UNIQUE_ID(ddebug), cls, fmt, \
+ func, ##__VA_ARGS__)
+#define _dynamic_func_call_no_desc(fmt, func, ...) \
+ _dynamic_func_call_cls_no_desc(_DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define dynamic_pr_debug_cls(cls, fmt, ...) \
+ _dynamic_func_call_cls(cls, fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_pr_debug(fmt, ...) \
- _dynamic_func_call(fmt, __dynamic_pr_debug, \
+ _dynamic_func_call(fmt, __dynamic_pr_debug, \
pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_dev_dbg(dev, fmt, ...) \
- _dynamic_func_call(fmt,__dynamic_dev_dbg, \
+ _dynamic_func_call(fmt, __dynamic_dev_dbg, \
dev, fmt, ##__VA_ARGS__)
#define dynamic_netdev_dbg(dev, fmt, ...) \
@@ -181,14 +287,24 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+/* for test only, generally expect drm.debug style macro wrappers */
+#define __pr_debug_cls(cls, fmt, ...) do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
+ "expecting constant class int/enum"); \
+ dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
+ } while (0)
+
#else /* !CONFIG_DYNAMIC_DEBUG_CORE */
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/printk.h>
-static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname)
+static inline int ddebug_add_module(struct _ddebug_info *dinfo, const char *modname)
{
return 0;
}
@@ -201,7 +317,7 @@ static inline int ddebug_remove_module(const char *mod)
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
@@ -221,12 +337,14 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
rowsize, groupsize, buf, len, ascii); \
} while (0)
-static inline int dynamic_debug_exec_queries(const char *query, const char *modname)
-{
- pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n");
- return 0;
-}
+struct kernel_param;
+static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{ return 0; }
+static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{ return 0; }
#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */
+extern const struct kernel_param_ops param_ops_dyndbg_classes;
+
#endif
diff --git a/include/linux/edac.h b/include/linux/edac.h
index e730b3468719..fa4bda2a70f6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -231,21 +231,21 @@ enum mem_type {
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
-#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
-#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
-#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
-#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
-#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
-#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
-#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
-#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
+#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
+#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
+#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
+#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d2b84c2fec39..da3974bf05d3 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -368,6 +368,9 @@ void efi_native_runtime_setup(void);
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf)
+#define EFI_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0x09576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
@@ -408,8 +411,10 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
+#define LINUX_EFI_ZBOOT_MEDIA_GUID EFI_GUID(0xe565a30d, 0x47da, 0x4dbd, 0xb3, 0x54, 0x9b, 0xb5, 0xc8, 0x4f, 0x8b, 0xe2)
#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
+#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
@@ -518,6 +523,15 @@ typedef union {
efi_system_table_32_t mixed_mode;
} efi_system_table_t;
+struct efi_boot_memmap {
+ unsigned long map_size;
+ unsigned long desc_size;
+ u32 desc_ver;
+ unsigned long map_key;
+ unsigned long buff_size;
+ efi_memory_desc_t map[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
* benefit of efi_memmap_init_early(), and for passing context between
@@ -952,6 +966,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_MEDIA_REL_OFFSET 8
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
@@ -982,12 +997,27 @@ struct efi_vendor_dev_path {
u8 vendordata[];
} __packed;
+struct efi_rel_offset_dev_path {
+ struct efi_generic_dev_path header;
+ u32 reserved;
+ u64 starting_offset;
+ u64 ending_offset;
+} __packed;
+
+struct efi_mem_mapped_dev_path {
+ struct efi_generic_dev_path header;
+ u32 memory_type;
+ u64 starting_addr;
+ u64 ending_addr;
+} __packed;
+
struct efi_dev_path {
union {
struct efi_generic_dev_path header;
struct efi_acpi_dev_path acpi;
struct efi_pci_dev_path pci;
struct efi_vendor_dev_path vendor;
+ struct efi_rel_offset_dev_path rel_offset;
};
} __packed;
@@ -1321,6 +1351,11 @@ struct linux_efi_coco_secret_area {
u64 size;
};
+struct linux_efi_initrd {
+ unsigned long base;
+ unsigned long size;
+};
+
/* Header of a populated EFI secret area */
#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 84a466b176cf..d95ab85f96ba 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -253,7 +253,6 @@ static __always_inline void arch_exit_to_user_mode(void) { }
/**
* arch_do_signal_or_restart - Architecture specific signal delivery function
* @regs: Pointer to currents pt_regs
- * @has_signal: actual signal to handle
*
* Invoked from exit_to_user_mode_loop().
*/
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 92b10e67d5f8..a541f0c4f146 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -428,6 +428,28 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
+static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
+{
+ u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, base, mask);
+}
+
+static inline bool ether_addr_is_ip_mcast(const u8 *addr)
+{
+ return ether_addr_is_ipv4_mcast(addr) ||
+ ether_addr_is_ipv6_mcast(addr);
+}
+
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 305d5f19093b..30eb30d6909b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -46,7 +46,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
static inline bool eventfd_signal_allowed(void)
{
- return !current->in_eventfd_signal;
+ return !current->in_eventfd;
}
#else /* CONFIG_EVENTFD */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index c2b1d4fd5987..fe7e6ba918f1 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -10,8 +10,10 @@
#include <linux/compiler.h>
#include <linux/types.h>
-/* __used is needed to keep __crc_* for LTO */
#define SYMBOL_CRC(sym, crc, sec) \
- u32 __section("___kcrctab" sec "+" #sym) __used __crc_##sym = crc
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index d445150c5350..ee0d75d9a302 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -73,6 +73,42 @@ struct f2fs_device {
__le32 total_segments;
} __packed;
+/* reason of stop_checkpoint */
+enum stop_cp_reason {
+ STOP_CP_REASON_SHUTDOWN,
+ STOP_CP_REASON_FAULT_INJECT,
+ STOP_CP_REASON_META_PAGE,
+ STOP_CP_REASON_WRITE_FAIL,
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
+ STOP_CP_REASON_MAX,
+};
+
+#define MAX_STOP_REASON 32
+
+/* detail reason for EFSCORRUPTED */
+enum f2fs_error {
+ ERROR_CORRUPTED_CLUSTER,
+ ERROR_FAIL_DECOMPRESSION,
+ ERROR_INVALID_BLKADDR,
+ ERROR_CORRUPTED_DIRENT,
+ ERROR_CORRUPTED_INODE,
+ ERROR_INCONSISTENT_SUMMARY,
+ ERROR_INCONSISTENT_FOOTER,
+ ERROR_INCONSISTENT_SUM_TYPE,
+ ERROR_CORRUPTED_JOURNAL,
+ ERROR_INCONSISTENT_NODE_COUNT,
+ ERROR_INCONSISTENT_BLOCK_COUNT,
+ ERROR_INVALID_CURSEG,
+ ERROR_INCONSISTENT_SIT,
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_MAX,
+};
+
+#define MAX_F2FS_ERRORS 16
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -116,7 +152,9 @@ struct f2fs_super_block {
__u8 hot_ext_count; /* # of hot file extension */
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
- __u8 reserved[306]; /* valid reserved region */
+ __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */
+ __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */
+ __u8 reserved[258]; /* valid reserved region */
__le32 crc; /* checksum of superblock */
} __packed;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 07fcd0e56682..0aff76bcbb00 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -615,10 +615,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name);
-extern int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
@@ -631,16 +627,10 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern struct fb_info *registered_fb[FB_MAX];
-extern int num_registered_fb;
extern bool fb_center_logo;
extern int fb_logo_count;
extern struct class *fb_class;
-#define for_each_registered_fb(i) \
- for (i = 0; i < FB_MAX; i++) \
- if (!registered_fb[i]) {} else
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a5f21dc3c432..efc42a6e3aed 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -567,6 +567,12 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
+extern struct mutex nf_conn_btf_access_lock;
+extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype, u32 *next_btf_id,
+ enum bpf_type_flag *flag);
+
typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
@@ -900,8 +906,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- unsigned int len);
+int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
@@ -1018,6 +1023,8 @@ extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -1030,6 +1037,9 @@ void bpf_jit_free(struct bpf_prog *fp);
struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
+void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_prog_pack_free(struct bpf_binary_header *hdr);
+
static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
{
return list_empty(&fp->aux->ksym.lnode) ||
@@ -1100,7 +1110,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ if (bpf_jit_harden == 1 && bpf_capable())
return false;
return true;
diff --git a/include/linux/find.h b/include/linux/find.h
index 424ef67d4a42..ccaf61a0f5fd 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -8,15 +8,33 @@
#include <linux/bitops.h>
-extern unsigned long _find_next_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
- unsigned long start, unsigned long invert, unsigned long le);
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+#ifdef __BIG_ENDIAN
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
@@ -41,7 +59,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
return val ? __ffs(val) : size;
}
- return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
+ return _find_next_bit(addr, size, offset);
}
#endif
@@ -71,7 +89,38 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
return val ? __ffs(val) : size;
}
- return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
+ * in *addr2
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_andnot_bit(addr1, addr2, size, offset);
}
#endif
@@ -99,7 +148,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
return val == ~0UL ? size : ffz(val);
}
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
+ return _find_next_zero_bit(addr, size, offset);
}
#endif
@@ -125,6 +174,87 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
}
#endif
+/**
+ * find_nth_bit - find N'th set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * The following is semantically equivalent:
+ * idx = find_nth_bit(addr, size, 0);
+ * idx = find_first_bit(addr, size);
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_bit(addr, size, n);
+}
+
+/**
+ * find_nth_and_bit - find N'th set bit in 2 memory regions
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
+ * flipping bits in 2nd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static inline
+unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_andnot_bit(addr1, addr2, size, n);
+}
+
#ifndef find_first_and_bit
/**
* find_first_and_bit - find the first set bit in both memory regions
@@ -194,6 +324,78 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
#endif
/**
+ * find_next_and_bit_wrap - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
+
+ if (bit < size)
+ return bit;
+
+ bit = find_first_and_bit(addr1, addr2, offset);
+ return bit < offset ? bit : size;
+}
+
+/**
+ * find_next_bit_wrap - find the next set bit in both memory regions
+ * @addr: The first address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit_wrap(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_bit(addr, size, offset);
+
+ if (bit < size)
+ return bit;
+
+ bit = find_first_bit(addr, offset);
+ return bit < offset ? bit : size;
+}
+
+/*
+ * Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
+ * before using it alone.
+ */
+static inline
+unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
+ unsigned long start, unsigned long n)
+{
+ unsigned long bit;
+
+ /* If not wrapped around */
+ if (n > start) {
+ /* and have a bit, just return it. */
+ bit = find_next_bit(bitmap, size, n);
+ if (bit < size)
+ return bit;
+
+ /* Otherwise, wrap around and ... */
+ n = 0;
+ }
+
+ /* Search the other part. */
+ bit = find_next_bit(bitmap, start, n);
+ return bit < start ? bit : size;
+}
+
+/**
* find_next_clump8 - find next 8-bit clump with set bits in a memory region
* @clump: location to store copy of found clump
* @addr: address to base the search on
@@ -247,7 +449,21 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
return val == ~0UL ? size : ffz(val);
}
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
+ return _find_next_zero_bit_le(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+static inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit_le(addr, size);
}
#endif
@@ -266,40 +482,39 @@ unsigned long find_next_bit_le(const void *addr, unsigned
return val ? __ffs(val) : size;
}
- return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
+ return _find_next_bit_le(addr, size, offset);
}
#endif
-#ifndef find_first_zero_bit_le
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-#endif
-
#else
#error "Please fix <asm/byteorder.h>"
#endif
#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), 0); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+ for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_and_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_andnot_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), 0); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+ for ((bit) = 0; \
+ (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
+ (bit)++)
/* same as for_each_clear_bit() but use bit as value to start with */
#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+ for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
/**
* for_each_set_bitrange - iterate over all set bit ranges [b; e)
@@ -309,11 +524,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
* @size: bitmap size in number of bits
*/
#define for_each_set_bitrange(b, e, addr, size) \
- for ((b) = find_next_bit((addr), (size), 0), \
- (e) = find_next_zero_bit((addr), (size), (b) + 1); \
+ for ((b) = 0; \
+ (b) = find_next_bit((addr), (size), b), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
(b) < (size); \
- (b) = find_next_bit((addr), (size), (e) + 1), \
- (e) = find_next_zero_bit((addr), (size), (b) + 1))
+ (b) = (e) + 1)
/**
* for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
@@ -323,11 +538,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
* @size: bitmap size in number of bits
*/
#define for_each_set_bitrange_from(b, e, addr, size) \
- for ((b) = find_next_bit((addr), (size), (b)), \
- (e) = find_next_zero_bit((addr), (size), (b) + 1); \
+ for (; \
+ (b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
(b) < (size); \
- (b) = find_next_bit((addr), (size), (e) + 1), \
- (e) = find_next_zero_bit((addr), (size), (b) + 1))
+ (b) = (e) + 1)
/**
* for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
@@ -337,11 +552,11 @@ unsigned long find_next_bit_le(const void *addr, unsigned
* @size: bitmap size in number of bits
*/
#define for_each_clear_bitrange(b, e, addr, size) \
- for ((b) = find_next_zero_bit((addr), (size), 0), \
- (e) = find_next_bit((addr), (size), (b) + 1); \
+ for ((b) = 0; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
(b) < (size); \
- (b) = find_next_zero_bit((addr), (size), (e) + 1), \
- (e) = find_next_bit((addr), (size), (b) + 1))
+ (b) = (e) + 1)
/**
* for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
@@ -351,11 +566,24 @@ unsigned long find_next_bit_le(const void *addr, unsigned
* @size: bitmap size in number of bits
*/
#define for_each_clear_bitrange_from(b, e, addr, size) \
- for ((b) = find_next_zero_bit((addr), (size), (b)), \
- (e) = find_next_bit((addr), (size), (b) + 1); \
+ for (; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
(b) < (size); \
- (b) = find_next_zero_bit((addr), (size), (e) + 1), \
- (e) = find_next_bit((addr), (size), (b) + 1))
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bit_wrap - iterate over all set bits starting from @start, and
+ * wrapping around the end of bitmap.
+ * @bit: offset for current iteration
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ * @start: Starting bit for bitmap traversing, wrapping around the bitmap end
+ */
+#define for_each_set_bit_wrap(bit, addr, size, start) \
+ for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
+ (bit) < (size); \
+ (bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
/**
* for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 9f50dacbf7d6..76d2b3ebad84 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -153,6 +153,9 @@ enum pm_ioctl_id {
/* Runtime feature configuration */
IOCTL_SET_FEATURE_CONFIG = 26,
IOCTL_GET_FEATURE_CONFIG = 27,
+ /* Dynamic SD/GEM configuration */
+ IOCTL_SET_SD_CONFIG = 30,
+ IOCTL_SET_GEM_CONFIG = 31,
};
enum pm_query_id {
@@ -400,6 +403,30 @@ enum pm_feature_config_id {
};
/**
+ * enum pm_sd_config_type - PM SD configuration.
+ * @SD_CONFIG_EMMC_SEL: To set SD_EMMC_SEL in CTRL_REG_SD and SD_SLOTTYPE
+ * @SD_CONFIG_BASECLK: To set SD_BASECLK in SD_CONFIG_REG1
+ * @SD_CONFIG_8BIT: To set SD_8BIT in SD_CONFIG_REG2
+ * @SD_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_sd_config_type {
+ SD_CONFIG_EMMC_SEL = 1,
+ SD_CONFIG_BASECLK = 2,
+ SD_CONFIG_8BIT = 3,
+ SD_CONFIG_FIXED = 4,
+};
+
+/**
+ * enum pm_gem_config_type - PM GEM configuration.
+ * @GEM_CONFIG_SGMII_MODE: To set GEM_SGMII_MODE in GEM_CLK_CTRL register
+ * @GEM_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_gem_config_type {
+ GEM_CONFIG_SGMII_MODE = 1,
+ GEM_CONFIG_FIXED = 2,
+};
+
+/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
* @arg1: Argument 1 of query data
@@ -475,6 +502,9 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value);
#else
static inline int zynqmp_pm_get_api_version(u32 *version)
{
@@ -745,6 +775,21 @@ static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_set_sd_config(u32 node,
+ enum pm_sd_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_gem_config(u32 node,
+ enum pm_gem_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 3b401fa0f374..4029fe368a4f 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
+#include <linux/bug.h>
#include <linux/const.h>
+#include <linux/limits.h>
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
#define __RENAME(x) __asm__(#x)
@@ -17,9 +19,10 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
#define __compiletime_strlen(p) \
({ \
unsigned char *__p = (unsigned char *)(p); \
- size_t __ret = (size_t)-1; \
- size_t __p_size = __builtin_object_size(p, 1); \
- if (__p_size != (size_t)-1) { \
+ size_t __ret = SIZE_MAX; \
+ size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
size_t __p_len = __p_size - 1; \
if (__builtin_constant_p(__p[__p_len]) && \
__p[__p_len] == '\0') \
@@ -69,20 +72,59 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
__underlying_memcpy(dst, src, bytes)
/*
- * Clang's use of __builtin_object_size() within inlines needs hinting via
- * __pass_object_size(). The preference is to only ever use type 1 (member
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
* size, rather than struct size), but there remain some stragglers using
* type 0 that will be converted in the future.
*/
-#define POS __pass_object_size(1)
-#define POS0 __pass_object_size(0)
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+-----------------+------------+
+ * | @p needs to be: | padded to @size | not padded |
+ * +====================+=================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+-----------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+-----------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -92,9 +134,9 @@ char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
char *strcat(char * const POS p, const char *q)
{
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
- if (p_size == (size_t)-1)
+ if (p_size == SIZE_MAX)
return __underlying_strcat(p, q);
if (strlcat(p, q, p_size) >= p_size)
fortify_panic(__func__);
@@ -104,12 +146,12 @@ char *strcat(char * const POS p, const char *q)
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
{
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
size_t p_len = __compiletime_strlen(p);
size_t ret;
/* We can take compile-time actions when maxlen is const. */
- if (__builtin_constant_p(maxlen) && p_len != (size_t)-1) {
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
/* If p is const, we can use its compile-time-known len. */
if (maxlen >= p_size)
return p_len;
@@ -134,10 +176,10 @@ __FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
__kernel_size_t __fortify_strlen(const char * const POS p)
{
__kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 1);
+ size_t p_size = __member_size(p);
/* Give up if we don't know how large p is. */
- if (p_size == (size_t)-1)
+ if (p_size == SIZE_MAX)
return __underlying_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
@@ -149,12 +191,12 @@ __kernel_size_t __fortify_strlen(const char * const POS p)
extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
{
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
size_t q_len; /* Full count of source string length. */
size_t len; /* Count of characters going into destination. */
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __real_strlcpy(p, q, size);
q_len = strlen(q);
len = (q_len >= size) ? size - 1 : q_len;
@@ -178,18 +220,18 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s
{
size_t len;
/* Use string size rather than possible enclosing struct size. */
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
/* If we cannot get size of p and q default to call strscpy. */
- if (p_size == (size_t) -1 && q_size == (size_t) -1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __real_strscpy(p, q, size);
/*
* If size can be known at compile time and is greater than
* p_size, generate a compile time write overflow error.
*/
- if (__builtin_constant_p(size) && size > p_size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
/*
@@ -224,10 +266,10 @@ __FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
{
size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strncat(p, q, count);
p_len = strlen(p);
copy_len = strnlen(q, count);
@@ -246,15 +288,16 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
/*
* Length argument is a constant expression, so we
* can perform compile-time bounds checking where
- * buffer sizes are known.
+ * buffer sizes are also known at compile time.
*/
/* Error when size is larger than enclosing struct. */
- if (p_size > p_size_field && p_size < size)
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
__write_overflow();
/* Warn when write size is larger than dest field. */
- if (p_size_field < size)
+ if (__compiletime_lessthan(p_size_field, size))
__write_overflow_field(p_size_field, size);
}
/*
@@ -268,10 +311,10 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
/*
* Always stop accesses beyond the struct that contains the
* field, when the buffer's remaining size is known.
- * (The -1 test is to optimize away checks where the buffer
+ * (The SIZE_MAX test is to optimize away checks where the buffer
* lengths are unknown.)
*/
- if (p_size != (size_t)(-1) && p_size < size)
+ if (p_size != SIZE_MAX && p_size < size)
fortify_panic("memset");
}
@@ -282,11 +325,13 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
})
/*
- * __builtin_object_size() must be captured here to avoid evaluating argument
- * side-effects further into the macro layers.
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
*/
+#ifndef CONFIG_KMSAN
#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
- __builtin_object_size(p, 0), __builtin_object_size(p, 1))
+ __struct_size(p), __member_size(p))
+#endif
/*
* To make sure the compiler can enforce protection against buffer overflows,
@@ -319,7 +364,7 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
* V = vulnerable to run-time overflow (will need refactoring to solve)
*
*/
-__FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t p_size,
const size_t q_size,
const size_t p_size_field,
@@ -330,25 +375,28 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
/*
* Length argument is a constant expression, so we
* can perform compile-time bounds checking where
- * buffer sizes are known.
+ * buffer sizes are also known at compile time.
*/
/* Error when size is larger than enclosing struct. */
- if (p_size > p_size_field && p_size < size)
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
__write_overflow();
- if (q_size > q_size_field && q_size < size)
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
__read_overflow2();
/* Warn when write size argument larger than dest field. */
- if (p_size_field < size)
+ if (__compiletime_lessthan(p_size_field, size))
__write_overflow_field(p_size_field, size);
/*
* Warn for source field over-read when building with W=1
* or when an over-write happened, so both can be fixed at
* the same time.
*/
- if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || p_size_field < size) &&
- q_size_field < size)
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
__read_overflow2_field(q_size_field, size);
}
/*
@@ -362,41 +410,104 @@ __FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
/*
* Always stop accesses beyond the struct that contains the
* field, when the buffer's remaining size is known.
- * (The -1 test is to optimize away checks where the buffer
+ * (The SIZE_MAX test is to optimize away checks where the buffer
* lengths are unknown.)
*/
- if ((p_size != (size_t)(-1) && p_size < size) ||
- (q_size != (size_t)(-1) && q_size < size))
+ if ((p_size != SIZE_MAX && p_size < size) ||
+ (q_size != SIZE_MAX && q_size < size))
fortify_panic(func);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * We must ignore p_size_field == 0 for existing 0-element
+ * fake flexible arrays, until they are all converted to
+ * proper flexible arrays.
+ *
+ * The implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
}
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
size_t __fortify_size = (size_t)(size); \
- fortify_memcpy_chk(__fortify_size, p_size, q_size, \
- p_size_field, q_size_field, #op); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, p_size, q_size, \
+ p_size_field, q_size_field, #op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
+ p_size_field); \
__underlying_##op(p, q, __fortify_size); \
})
/*
- * __builtin_object_size() must be captured here to avoid evaluating argument
- * side-effects further into the macro layers.
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
*/
#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
- __builtin_object_size(p, 0), __builtin_object_size(q, 0), \
- __builtin_object_size(p, 1), __builtin_object_size(q, 1), \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
memcpy)
#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
- __builtin_object_size(p, 0), __builtin_object_size(q, 0), \
- __builtin_object_size(p, 1), __builtin_object_size(q, 1), \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
memmove)
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -406,13 +517,13 @@ __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ size_t p_size = __struct_size(p);
+ size_t q_size = __struct_size(q);
if (__builtin_constant_p(size)) {
- if (p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
- if (q_size < size)
+ if (__compiletime_lessthan(q_size, size))
__read_overflow2();
}
if (p_size < size || q_size < size)
@@ -423,9 +534,9 @@ int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t
__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -435,9 +546,9 @@ void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -447,9 +558,9 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
{
- size_t p_size = __builtin_object_size(p, 0);
+ size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
fortify_panic(__func__);
@@ -460,16 +571,18 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
char *strcpy(char * const POS p, const char * const POS q)
{
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t p_size = __member_size(p);
+ size_t q_size = __member_size(q);
size_t size;
/* If neither buffer size is known, immediately give up. */
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strcpy(p, q);
size = strlen(q) + 1;
/* Compile-time check for const size overflow. */
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
/* Run-time check for dynamic size overflow. */
if (p_size < size)
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 0621c5f86c39..b303472255be 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -8,9 +8,11 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_FREEZER
-extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+DECLARE_STATIC_KEY_FALSE(freezer_active);
+
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
@@ -22,10 +24,7 @@ extern unsigned int freeze_timeout_msecs;
/*
* Check if a process has been frozen
*/
-static inline bool frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
+extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
@@ -34,9 +33,10 @@ extern bool freezing_slow_path(struct task_struct *p);
*/
static inline bool freezing(struct task_struct *p)
{
- if (likely(!atomic_read(&system_freezing_cnt)))
- return false;
- return freezing_slow_path(p);
+ if (static_branch_unlikely(&freezer_active))
+ return freezing_slow_path(p);
+
+ return false;
}
/* Takes and releases task alloc lock using task_lock() */
@@ -48,23 +48,14 @@ extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
-/*
- * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
- * If try_to_freeze causes a lockdep warning it means the caller may deadlock
- */
-static inline bool try_to_freeze_unsafe(void)
+static inline bool try_to_freeze(void)
{
might_sleep();
if (likely(!freezing(current)))
return false;
- return __refrigerator(false);
-}
-
-static inline bool try_to_freeze(void)
-{
if (!(current->flags & PF_NOFREEZE))
debug_check_no_locks_held();
- return try_to_freeze_unsafe();
+ return __refrigerator(false);
}
extern bool freeze_task(struct task_struct *p);
@@ -79,195 +70,6 @@ static inline bool cgroup_freezing(struct task_struct *task)
}
#endif /* !CONFIG_CGROUP_FREEZER */
-/*
- * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
- * calls wait_for_completion(&vfork) and reset right after it returns from this
- * function. Next, the parent should call try_to_freeze() to freeze itself
- * appropriately in case the child has exited before the freezing of tasks is
- * complete. However, we don't want kernel threads to be frozen in unexpected
- * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
- * parent won't really block freeze_processes(), since ____call_usermodehelper()
- * (the child) does a little before exec/exit and it can't be frozen before
- * waking up the parent.
- */
-
-
-/**
- * freezer_do_not_count - tell freezer to ignore %current
- *
- * Tell freezers to ignore the current task when determining whether the
- * target frozen state is reached. IOW, the current task will be
- * considered frozen enough by freezers.
- *
- * The caller shouldn't do anything which isn't allowed for a frozen task
- * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
- * wrap a scheduling operation and nothing much else.
- */
-static inline void freezer_do_not_count(void)
-{
- current->flags |= PF_FREEZER_SKIP;
-}
-
-/**
- * freezer_count - tell freezer to stop ignoring %current
- *
- * Undo freezer_do_not_count(). It tells freezers that %current should be
- * considered again and tries to freeze if freezing condition is already in
- * effect.
- */
-static inline void freezer_count(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- /*
- * If freezing is in progress, the following paired with smp_mb()
- * in freezer_should_skip() ensures that either we see %true
- * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
- */
- smp_mb();
- try_to_freeze();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezer_count_unsafe(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- smp_mb();
- try_to_freeze_unsafe();
-}
-
-/**
- * freezer_should_skip - whether to skip a task when determining frozen
- * state is reached
- * @p: task in quesion
- *
- * This function is used by freezers after establishing %true freezing() to
- * test whether a task should be skipped when determining the target frozen
- * state is reached. IOW, if this function returns %true, @p is considered
- * frozen enough.
- */
-static inline bool freezer_should_skip(struct task_struct *p)
-{
- /*
- * The following smp_mb() paired with the one in freezer_count()
- * ensures that either freezer_count() sees %true freezing() or we
- * see cleared %PF_FREEZER_SKIP and return %false. This makes it
- * impossible for a task to slip frozen state testing after
- * clearing %PF_FREEZER_SKIP.
- */
- smp_mb();
- return p->flags & PF_FREEZER_SKIP;
-}
-
-/*
- * These functions are intended to be used whenever you want allow a sleeping
- * task to be frozen. Note that neither return any clear indication of
- * whether a freeze event happened while in this function.
- */
-
-/* Like schedule(), but should not block the freezer. */
-static inline void freezable_schedule(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezable_schedule_unsafe(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count_unsafe();
-}
-
-/*
- * Like schedule_timeout(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout(timeout);
- freezer_count();
- return __retval;
-}
-
-/*
- * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout_interruptible(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
-{
- long __retval;
-
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/* Like schedule_timeout_killable(), but should not block the freezer. */
-static inline long freezable_schedule_timeout_killable(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/*
- * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- u64 delta, const enum hrtimer_mode mode)
-{
- int __retval;
- freezer_do_not_count();
- __retval = schedule_hrtimeout_range(expires, delta, mode);
- freezer_count();
- return __retval;
-}
-
-/*
- * Freezer-friendly wrappers around wait_event_interruptible(),
- * wait_event_killable() and wait_event_interruptible_timeout(), originally
- * defined in <linux/wait.h>
- */
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-#define wait_event_freezekillable_unsafe(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count_unsafe(); \
- __retval; \
-})
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -281,35 +83,8 @@ static inline void thaw_kernel_threads(void) {}
static inline bool try_to_freeze(void) { return false; }
-static inline void freezer_do_not_count(void) {}
-static inline void freezer_count(void) {}
-static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#define freezable_schedule() schedule()
-
-#define freezable_schedule_unsafe() schedule()
-
-#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
-
-#define freezable_schedule_timeout_interruptible(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_interruptible_unsafe(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_killable(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_timeout_killable_unsafe(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
- schedule_hrtimeout_range(expires, delta, mode)
-
-#define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9eced4cc286e..e654435f1651 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1472,7 +1472,7 @@ struct super_block {
const struct xattr_handler **s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
- struct key *s_master_keys; /* master crypto keys in use */
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
#endif
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;
@@ -2004,8 +2004,9 @@ static inline int vfs_whiteout(struct user_namespace *mnt_userns,
WHITEOUT_DEV);
}
-struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
- struct dentry *dentry, umode_t mode, int open_flag);
+struct file *vfs_tmpfile_open(struct user_namespace *mnt_userns,
+ const struct path *parentpath,
+ umode_t mode, int open_flag, const struct cred *cred);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
@@ -2038,9 +2039,10 @@ umode_t mode_strip_sgid(struct user_namespace *mnt_userns,
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
+ * Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
-typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
@@ -2132,6 +2134,8 @@ struct file_operations {
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+ unsigned int poll_flags);
} __randomize_layout;
struct inode_operations {
@@ -2167,7 +2171,7 @@ struct inode_operations {
struct file *, unsigned open_flag,
umode_t create_mode);
int (*tmpfile) (struct user_namespace *, struct inode *,
- struct dentry *, umode_t);
+ struct file *, umode_t);
int (*set_acl)(struct user_namespace *, struct inode *,
struct posix_acl *, int);
int (*fileattr_set)(struct user_namespace *mnt_userns,
@@ -2371,13 +2375,14 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* don't have to write inode on fdatasync() when only
* e.g. the timestamps have changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
- * I_DIRTY_TIME The inode itself only has dirty timestamps, and the
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
* lazytime mount option is enabled. We keep track of this
* separately from I_DIRTY_SYNC in order to implement
* lazytime. This gets cleared if I_DIRTY_INODE
- * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. I.e.
- * either I_DIRTY_TIME *or* I_DIRTY_INODE can be set in
- * i_state, but not both. I_DIRTY_PAGES may still be set.
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
* I_NEW Serves as both a mutex and completion notification.
* New inodes set I_NEW. If two processes both create
* the same inode, one of them will release its inode and
@@ -2779,6 +2784,15 @@ extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
+/* Helper for the simple case when original dentry is used */
+static inline int finish_open_simple(struct file *file, int error)
+{
+ if (error)
+ return error;
+
+ return finish_open(file, file->f_path.dentry, NULL);
+}
+
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
@@ -3540,17 +3554,17 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+ file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
+ parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 7d2f1e0f23b1..cad78b569c7e 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -161,24 +161,21 @@ struct fscrypt_operations {
int *ino_bits_ret, int *lblk_bits_ret);
/*
- * Return the number of block devices to which the filesystem may write
- * encrypted file contents.
+ * Return an array of pointers to the block devices to which the
+ * filesystem may write encrypted file contents, NULL if the filesystem
+ * only has a single such block device, or an ERR_PTR() on error.
+ *
+ * On successful non-NULL return, *num_devs is set to the number of
+ * devices in the returned array. The caller must free the returned
+ * array using kfree().
*
* If the filesystem can use multiple block devices (other than block
* devices that aren't used for encrypted file contents, such as
* external journal devices), and wants to support inline encryption,
* then it must implement this function. Otherwise it's not needed.
*/
- int (*get_num_devices)(struct super_block *sb);
-
- /*
- * If ->get_num_devices() returns a value greater than 1, then this
- * function is called to get the array of request_queues that the
- * filesystem is using -- one per block device. (There may be duplicate
- * entries in this array, as block devices can share a request_queue.)
- */
- void (*get_devices)(struct super_block *sb,
- struct request_queue **devs);
+ struct block_device **(*get_devices)(struct super_block *sb,
+ unsigned int *num_devs);
};
static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
@@ -295,8 +292,6 @@ int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
struct fscrypt_dummy_policy *dummy_policy);
bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
const struct fscrypt_dummy_policy *p2);
-int fscrypt_set_test_dummy_encryption(struct super_block *sb, const char *arg,
- struct fscrypt_dummy_policy *dummy_policy);
void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
struct super_block *sb);
static inline bool
@@ -312,7 +307,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
-void fscrypt_sb_free(struct super_block *sb);
+void fscrypt_sb_delete(struct super_block *sb);
int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
int fscrypt_add_test_dummy_key(struct super_block *sb,
const struct fscrypt_dummy_policy *dummy_policy);
@@ -353,7 +348,7 @@ u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
/* bio.c */
-void fscrypt_decrypt_bio(struct bio *bio);
+bool fscrypt_decrypt_bio(struct bio *bio);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len);
@@ -526,7 +521,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
-static inline void fscrypt_sb_free(struct super_block *sb)
+static inline void fscrypt_sb_delete(struct super_block *sb)
{
}
@@ -646,8 +641,9 @@ static inline int fscrypt_d_revalidate(struct dentry *dentry,
}
/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
+static inline bool fscrypt_decrypt_bio(struct bio *bio)
{
+ return true;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
@@ -768,7 +764,7 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh);
-bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter);
+bool fscrypt_dio_supported(struct inode *inode);
u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
@@ -801,11 +797,8 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
return true;
}
-static inline bool fscrypt_dio_supported(struct kiocb *iocb,
- struct iov_iter *iter)
+static inline bool fscrypt_dio_supported(struct inode *inode)
{
- const struct inode *inode = file_inode(iocb->ki_filp);
-
return !fscrypt_needs_contents_encryption(inode);
}
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 7af030fa3c36..40f14e5fed9d 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -22,6 +22,9 @@
*/
#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
/* Verity operations for filesystems */
struct fsverity_operations {
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 0b61371e287b..62557d4bffc2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1122,47 +1122,6 @@ static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
-
-/* flags for current->trace */
-enum {
- TSK_TRACE_FL_TRACE_BIT = 0,
- TSK_TRACE_FL_GRAPH_BIT = 1,
-};
-enum {
- TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
- TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
-};
-
-static inline void set_tsk_trace_trace(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_trace(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_trace(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_TRACE;
-}
-
-static inline void set_tsk_trace_graph(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_graph(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_graph(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_GRAPH;
-}
-
enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops;
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 69081d899492..8c2f00018e89 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -110,7 +110,7 @@ static inline void gameport_free_port(struct gameport *gameport)
static inline void gameport_set_name(struct gameport *gameport, const char *name)
{
- strlcpy(gameport->name, name, sizeof(gameport->name));
+ strscpy(gameport->name, name, sizeof(gameport->name));
}
/*
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 939b1a8f571b..4a4b387181ad 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -294,6 +294,7 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
+ .resv_start_op = 42, /* drbd is currently the only user */
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
.module = THIS_MODULE,
};
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f314be58fa77..ef4aea3b356e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -18,6 +18,9 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
@@ -33,29 +36,6 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
-/**
- * gfpflags_normal_context - is gfp_flags a normal sleepable context?
- * @gfp_flags: gfp_flags to test
- *
- * Test whether @gfp_flags indicates that the allocation is from the
- * %current context and allowed to sleep.
- *
- * An allocation being allowed to block doesn't mean it owns the %current
- * context. When direct reclaim path tries to allocate memory, the
- * allocation context is nested inside whatever %current was doing at the
- * time of the original allocation. The nested allocation may be allowed
- * to block but modifying anything %current owns can corrupt the outer
- * context's expectations.
- *
- * %true result from this function indicates that the allocation context
- * can sleep and use anything that's associated with %current.
- */
-static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
-{
- return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
- __GFP_DIRECT_RECLAIM;
-}
-
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index fe0f460d9a3b..36460ced060b 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -174,10 +174,6 @@ int desc_to_gpio(const struct gpio_desc *desc);
/* Child properties interface */
struct fwnode_handle;
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
@@ -554,15 +550,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
struct fwnode_handle;
static inline
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index c8ec982ff498..2f4dcc8d060e 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -336,7 +336,12 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
-int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 4363a63b9775..8677ae38599e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -314,15 +314,6 @@ struct hid_item {
#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
-/*
- * HID report types --- Ouch! HID spec says 1 2 3!
- */
-
-#define HID_INPUT_REPORT 0
-#define HID_OUTPUT_REPORT 1
-#define HID_FEATURE_REPORT 2
-
-#define HID_REPORT_TYPES 3
/*
* HID connect requests
@@ -509,7 +500,7 @@ struct hid_report {
struct list_head hidinput_list;
struct list_head field_entry_list; /* ordered list of input fields */
unsigned int id; /* id of this report */
- unsigned int type; /* report type */
+ enum hid_report_type type; /* report type */
unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
struct hid_field_entry *field_entries; /* allocated memory of input field_entry */
@@ -658,6 +649,8 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+
+ unsigned int id; /* system unique id */
};
#define to_hid_device(pdev) \
@@ -924,20 +917,21 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int application);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
@@ -1106,10 +1100,11 @@ void hid_hw_stop(struct hid_device *hdev);
int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype);
+ struct hid_report *report, enum hid_class_request reqtype);
int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype);
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len);
/**
@@ -1137,7 +1132,7 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
* @reqtype: hid request type
*/
static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
- int reqtype)
+ enum hid_class_request reqtype)
{
if (hdev->ll_driver->idle)
return hdev->ll_driver->idle(hdev, report, idle, reqtype);
@@ -1182,8 +1177,8 @@ static inline u32 hid_report_len(struct hid_report *report)
return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
- int interrupt);
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 25679035ca28..e9912da5441b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -311,6 +312,7 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
kunmap_local(vto);
kunmap_local(vfrom);
}
@@ -326,6 +328,7 @@ static inline void copy_highpage(struct page *to, struct page *from)
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
kunmap_local(vto);
kunmap_local(vfrom);
}
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 116e8bd68c99..e230c7c46110 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -87,29 +87,6 @@
#define PEH_AXUSER_CFG 0x401001
#define PEH_AXUSER_CFG_ENABLE 0xffffffff
-#define QM_AXI_RRESP BIT(0)
-#define QM_AXI_BRESP BIT(1)
-#define QM_ECC_MBIT BIT(2)
-#define QM_ECC_1BIT BIT(3)
-#define QM_ACC_GET_TASK_TIMEOUT BIT(4)
-#define QM_ACC_DO_TASK_TIMEOUT BIT(5)
-#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6)
-#define QM_SQ_CQ_VF_INVALID BIT(7)
-#define QM_CQ_VF_INVALID BIT(8)
-#define QM_SQ_VF_INVALID BIT(9)
-#define QM_DB_TIMEOUT BIT(10)
-#define QM_OF_FIFO_OF BIT(11)
-#define QM_DB_RANDOM_INVALID BIT(12)
-#define QM_MAILBOX_TIMEOUT BIT(13)
-#define QM_FLR_TIMEOUT BIT(14)
-
-#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
- QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
- QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
-#define QM_BASE_CE QM_ECC_1BIT
-
-#define QM_Q_DEPTH 1024
#define QM_MIN_QNUM 2
#define HISI_ACC_SGL_SGE_NR_MAX 255
#define QM_SHAPER_CFG 0x100164
@@ -168,6 +145,15 @@ enum qm_vf_state {
QM_NOT_READY,
};
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+};
+
struct dfx_diff_registers {
u32 *regs;
u32 reg_offset;
@@ -232,7 +218,10 @@ struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
u32 ecc_2bits_mask;
- u32 dev_ce_mask;
+ u32 qm_shutdown_mask;
+ u32 dev_shutdown_mask;
+ u32 qm_reset_mask;
+ u32 dev_reset_mask;
u32 ce;
u32 nfe;
u32 fe;
@@ -258,6 +247,18 @@ struct hisi_qm_err_ini {
void (*err_info_init)(struct hisi_qm *qm);
};
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
@@ -278,6 +279,9 @@ struct hisi_qm {
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
u32 sqe_size;
u32 qp_base;
u32 qp_num;
@@ -286,6 +290,8 @@ struct hisi_qm {
u32 max_qp_num;
u32 vfs_num;
u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
struct list_head list;
struct hisi_qm_list *qm_list;
@@ -304,6 +310,8 @@ struct hisi_qm {
struct hisi_qm_err_info err_info;
struct hisi_qm_err_status err_status;
unsigned long misc_ctl; /* driver removing and reset sched */
+ /* Device capability bit */
+ unsigned long caps;
struct rw_semaphore qps_lock;
struct idr qp_idr;
@@ -326,8 +334,6 @@ struct hisi_qm {
bool use_sva;
bool is_frozen;
- /* doorbell isolation enable */
- bool use_db_isolation;
resource_size_t phys_base;
resource_size_t db_phys_base;
struct uacce_device *uacce;
@@ -351,6 +357,8 @@ struct hisi_qp_ops {
struct hisi_qp {
u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
u8 alg_type;
u8 req_type;
@@ -501,6 +509,9 @@ void hisi_qm_pm_init(struct hisi_qm *qm);
int hisi_qm_get_dfx_access(struct hisi_qm *qm);
void hisi_qm_put_dfx_access(struct hisi_qm *qm);
void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
/* Used by VFIO ACC live migration driver */
struct pci_driver *hisi_sec_get_pf_driver(void);
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
index 842fce69ac06..5f8ac9b1d724 100644
--- a/include/linux/htcpld.h
+++ b/include/linux/htcpld.h
@@ -13,8 +13,6 @@ struct htcpld_chip_platform_data {
};
struct htcpld_core_platform_data {
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
unsigned int i2c_adapter_id;
struct htcpld_chip_platform_data *chip;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 768e5261fdae..a1341fdcf666 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -168,9 +168,8 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags,
- bool smaps, bool in_pf);
+bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
+ bool smaps, bool in_pf, bool enforce_sysfs);
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -219,6 +218,9 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
int advice);
+int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
unsigned long end, long adjust_next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
@@ -321,8 +323,8 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
}
static inline bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags,
- bool smaps, bool in_pf)
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs)
{
return false;
}
@@ -362,9 +364,16 @@ static inline void split_huge_pmd_address(struct vm_area_struct *vma,
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
+}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
+{
+ return -EINVAL;
}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
@@ -435,6 +444,11 @@ static inline int split_folio_to_list(struct folio *folio,
return split_huge_page_to_list(&folio->page, list);
}
+static inline int split_folio(struct folio *folio)
+{
+ return split_folio_to_list(folio, NULL);
+}
+
/*
* archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
* limitations in the implementation like arm64 MTE can override this to
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3ec981a0d8b3..8b4f93e84868 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -16,8 +16,9 @@
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
-#ifndef is_hugepd
+#ifndef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
@@ -114,6 +115,12 @@ struct file_region {
#endif
};
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
+};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -126,7 +133,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
@@ -207,13 +214,21 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd,
int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
+ int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
pgd_t *pgd, int flags);
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
+
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -225,7 +240,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{
}
@@ -312,8 +327,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
return NULL;
}
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
- unsigned long address, pmd_t *pmd, int flags)
+static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
+ unsigned long address, int flags)
{
return NULL;
}
@@ -336,6 +351,31 @@ static inline int prepare_hugepage_range(struct file *file,
return -EINVAL;
}
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+}
+
static inline int pmd_huge(pmd_t pmd)
{
return 0;
@@ -665,7 +705,7 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct page *page);
@@ -935,6 +975,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -1109,6 +1154,14 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
}
+
+static inline void hugetlb_register_node(struct node *node)
+{
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -1123,14 +1176,10 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
-extern void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_reserve(int order)
{
}
-static inline __init void hugetlb_cma_check(void)
-{
-}
#endif
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 379344828e78..630cd255d0cf 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -90,32 +90,31 @@ hugetlb_cgroup_from_page_rsvd(struct page *page)
return __hugetlb_cgroup_from_page(page, true);
}
-static inline int __set_hugetlb_cgroup(struct page *page,
+static inline void __set_hugetlb_cgroup(struct page *page,
struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
+ return;
if (rsvd)
set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
(unsigned long)h_cg);
else
set_page_private(page + SUBPAGE_INDEX_CGROUP,
(unsigned long)h_cg);
- return 0;
}
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, false);
+ __set_hugetlb_cgroup(page, h_cg, false);
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, true);
+ __set_hugetlb_cgroup(page, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -199,16 +198,14 @@ hugetlb_cgroup_from_page_rsvd(struct page *page)
return NULL;
}
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 78dd7035d1e5..f319bd26b030 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -74,12 +74,12 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+extern bool hw_breakpoint_is_used(void);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
-int hw_breakpoint_weight(struct perf_event *bp);
int arch_reserve_bp_slot(struct perf_event *bp);
void arch_release_bp_slot(struct perf_event *bp);
void arch_unregister_hw_breakpoint(struct perf_event *bp);
@@ -121,6 +121,8 @@ register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline bool hw_breakpoint_is_used(void) { return false; }
+
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index aa1d4da03538..77c2885c4c13 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -50,6 +50,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -61,4 +62,6 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 8eab5017bff3..f7c49bbdb8a1 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -273,7 +273,7 @@ struct i2c_driver {
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
- int (*remove)(struct i2c_client *client);
+ void (*remove)(struct i2c_client *client);
/* New driver model interface to aid the seamless removal of the
* current probe()'s, more commonly unused than used second parameter.
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b6e6d5b40774..79690938d9a2 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2888,7 +2888,8 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
static inline u8
ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
- const struct ieee80211_eht_cap_elem_fixed *eht_cap)
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
{
u8 count = 0;
@@ -2909,7 +2910,10 @@ ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
count += 3;
- return count ? count : 4;
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
}
/* 802.11be EHT PPE Thresholds */
@@ -2945,7 +2949,8 @@ ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
}
static inline bool
-ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len)
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
{
const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
@@ -2954,7 +2959,8 @@ ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len)
return false;
needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
- (const void *)data);
+ (const void *)data,
+ from_ap);
if (len < needed)
return false;
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 96d40942e5a3..c87efd333faa 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -4,8 +4,6 @@
*
* This file supplies definitions required by the PPP over L2TP driver
* (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOL2TP_H
#define __LINUX_IF_PPPOL2TP_H
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 69e813bcb947..ff3beda1312c 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -5,8 +5,6 @@
*
* This file supplies definitions required by the PPP over Ethernet driver
* (pppox.c). All version information wrt this file is located in pppox.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOX_H
#define __LINUX_IF_PPPOX_H
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 93c262ecbdc9..78890143f079 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -118,9 +118,9 @@ extern int ip_mc_source(int add, int omode, struct sock *sk,
struct ip_mreq_source *mreqs, int ifindex);
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
- struct ip_msfilter __user *optval, int __user *optlen);
+ sockptr_t optval, sockptr_t optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct sockaddr_storage __user *p);
+ sockptr_t optval, size_t offset);
extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5fa5957586cf..6802596b017c 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -13,7 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
-struct device_node;
+struct fwnode_handle;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -99,26 +99,20 @@ void iio_channel_release_all(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
/**
- * of_iio_channel_get_by_name() - get description of all that is needed to access channel.
- * @np: Pointer to consumer device tree node
+ * fwnode_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @fwnode: Pointer to consumer Firmware node
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels use within
* the consumer. E.g. 'battery_voltage'
*/
-#ifdef CONFIG_OF
-struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, const char *name);
-#else
-static inline struct iio_channel *
-of_iio_channel_get_by_name(struct device_node *np, const char *name)
-{
- return NULL;
-}
-#endif
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name);
/**
- * devm_of_iio_channel_get_by_name() - Resource managed version of of_iio_channel_get_by_name().
+ * devm_fwnode_iio_channel_get_by_name() - Resource managed version of
+ * fwnode_iio_channel_get_by_name().
* @dev: Pointer to consumer device.
- * @np: Pointer to consumer device tree node
+ * @fwnode: Pointer to consumer Firmware node
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels use within
* the consumer. E.g. 'battery_voltage'
@@ -129,9 +123,9 @@ of_iio_channel_get_by_name(struct device_node *np, const char *name)
* The allocated iio channel is automatically released when the device is
* unbound.
*/
-struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
- struct device_node *np,
- const char *consumer_channel);
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *consumer_channel);
struct iio_cb_buffer;
/**
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 6b3586b3f952..d1f8b30a7c8b 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -11,6 +11,7 @@
* checked by device drivers but should be considered
* read-only as this is a core internal bit
* @driver_module: used to make it harder to undercut users
+ * @mlock_key: lockdep class for iio_dev lock
* @info_exist_lock: lock to prevent use during removal
* @trig_readonly: mark the current trigger immutable
* @event_interface: event chrdevs associated with interrupt lines
@@ -42,6 +43,7 @@ struct iio_dev_opaque {
int currentmode;
int id;
struct module *driver_module;
+ struct lock_class_key mlock_key;
struct mutex info_exist_lock;
bool trig_readonly;
struct iio_event_interface *event_interface;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 5dfbfc991c69..f0ec8a5e5a7a 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -17,7 +17,7 @@
* Currently assumes nano seconds.
*/
-struct of_phandle_args;
+struct fwnode_reference_args;
enum iio_shared_by {
IIO_SEPARATE,
@@ -429,6 +429,8 @@ struct iio_trigger; /* forward declaration */
* provide a custom of_xlate function that reads the
* *args* and returns the appropriate index in registered
* IIO channels array.
+ * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
+ * Functionally the same as @of_xlate.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -508,8 +510,8 @@ struct iio_info {
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
- int (*of_xlate)(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec);
+ int (*fwnode_xlate)(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec);
int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
unsigned count);
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index a7aa91f3a8dc..82faa98c719a 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -17,6 +17,8 @@ enum iio_event_info {
IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
IIO_EV_INFO_LOW_PASS_FILTER_3DB,
IIO_EV_INFO_TIMEOUT,
+ IIO_EV_INFO_RESET_TIMEOUT,
+ IIO_EV_INFO_TAP2_MIN_DELAY,
};
#define IIO_VAL_INT 1
@@ -63,6 +65,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
IIO_CHAN_INFO_CALIBAMBIENT,
+ IIO_CHAN_INFO_ZEROPOINT,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/init.h b/include/linux/init.h
index baf0b29a7010..077d7f93b402 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -47,7 +47,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi
+#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
#define __initdata __section(".init.data")
#define __initconst __section(".init.rodata")
#define __exitdata __section(".exit.data")
@@ -134,7 +134,7 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
-/* Used for contructor calls. */
+/* Used for constructor calls. */
typedef void (*ctor_fn_t)(void);
struct file_system_type;
@@ -220,8 +220,8 @@ extern bool initcall_debug;
__initcall_name(initstub, __iid, id)
#define __define_initcall_stub(__stub, fn) \
- int __init __cficanonical __stub(void); \
- int __init __cficanonical __stub(void) \
+ int __init __stub(void); \
+ int __init __stub(void) \
{ \
return fn(); \
} \
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
deleted file mode 100644
index ed0776997a7a..000000000000
--- a/include/linux/input/auo-pixcir-ts.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for AUO in-cell touchscreens
- *
- * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on auo_touch.h from Dell Streak kernel
- *
- * Copyright (c) 2008 QUALCOMM Incorporated.
- * Copyright (c) 2008 QUALCOMM USA, INC.
- */
-
-#ifndef __AUO_PIXCIR_TS_H__
-#define __AUO_PIXCIR_TS_H__
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * compare coordinates: interrupt is asserted when coordinates change
- * indicate touch: interrupt is asserted during touch
- */
-#define AUO_PIXCIR_INT_PERIODICAL 0x00
-#define AUO_PIXCIR_INT_COMP_COORD 0x01
-#define AUO_PIXCIR_INT_TOUCH_IND 0x02
-
-/*
- * @gpio_int interrupt gpio
- * @int_setting one of AUO_PIXCIR_INT_*
- * @init_hw hardwarespecific init
- * @exit_hw hardwarespecific shutdown
- * @x_max x-resolution
- * @y_max y-resolution
- */
-struct auo_pixcir_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
- int int_setting;
-
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 42faebbaa202..501fa8486749 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -2,7 +2,7 @@
/*
* This header provides generic wrappers for memory access instrumentation that
- * the compiler cannot emit for: KASAN, KCSAN.
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
*/
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
#include <linux/types.h>
/**
@@ -117,10 +118,11 @@ instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
}
/**
- * instrument_copy_from_user - instrument writes of copy_from_user
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted before the accesses.
@@ -130,10 +132,61 @@ instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
* @n number of bytes to copy
*/
static __always_inline void
-instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
kcsan_check_write(to, n);
}
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ * @left number of bytes not copied (as returned by copy_from_user)
+ */
+static __always_inline void
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ *
+ * @to destination variable, may not be address-taken
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ *
+ * @from source address
+ * @ptr userspace pointer to copy to
+ * @size number of bytes to copy
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 6bd01f7159c6..cd5c5a27557f 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -123,7 +123,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
int icc_nodes_remove(struct icc_provider *provider);
int icc_provider_add(struct icc_provider *provider);
-int icc_provider_del(struct icc_provider *provider);
+void icc_provider_del(struct icc_provider *provider);
struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
void icc_sync_state(struct device *dev);
@@ -172,9 +172,8 @@ static inline int icc_provider_add(struct icc_provider *provider)
return -ENOTSUPP;
}
-static inline int icc_provider_del(struct icc_provider *provider)
+static inline void icc_provider_del(struct icc_provider *provider)
{
- return -ENOTSUPP;
}
static inline struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ca98aeadcc80..1f068dfdb140 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -16,7 +16,9 @@ enum io_pgtable_fmt {
ARM_V7S,
ARM_MALI_LPAE,
AMD_IOMMU_V1,
+ AMD_IOMMU_V2,
APPLE_DART,
+ APPLE_DART2,
IO_PGTABLE_NUM_FMTS,
};
@@ -260,6 +262,7 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io.h b/include/linux/io.h
index 5fc800390fe4..308f4f0cfb93 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -59,8 +59,6 @@ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
-void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
- resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 4a2f6cc5a492..43bc8a2edccf 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
enum io_uring_cmd_flags {
IO_URING_F_COMPLETE_DEFER = 1,
@@ -20,14 +21,20 @@ enum io_uring_cmd_flags {
struct io_uring_cmd {
struct file *file;
const void *cmd;
- /* callback to defer completions to task context */
- void (*task_work_cb)(struct io_uring_cmd *cmd);
+ union {
+ /* callback to defer completions to task context */
+ void (*task_work_cb)(struct io_uring_cmd *cmd);
+ /* used for polled completion */
+ void *cookie;
+ };
u32 cmd_op;
- u32 pad;
+ u32 flags;
u8 pdu[32]; /* available inline for free use */
};
#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *));
@@ -55,6 +62,11 @@ static inline void io_uring_free(struct task_struct *tsk)
__io_uring_free(tsk);
}
#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
ssize_t ret2)
{
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 677a25d44d7f..f5b687a787a3 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -34,9 +34,6 @@ struct io_file_table {
unsigned int alloc_hint;
};
-struct io_notif;
-struct io_notif_slot;
-
struct io_hash_bucket {
spinlock_t lock;
struct hlist_head list;
@@ -184,6 +181,8 @@ struct io_ev_fd {
struct eventfd_ctx *cq_ev_fd;
unsigned int eventfd_async: 1;
struct rcu_head rcu;
+ atomic_t refs;
+ atomic_t ops;
};
struct io_alloc_cache {
@@ -240,8 +239,6 @@ struct io_ring_ctx {
unsigned nr_user_files;
unsigned nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
- struct io_notif_slot *notif_slots;
- unsigned nr_notif_slots;
struct io_submit_state submit_state;
@@ -301,6 +298,8 @@ struct io_ring_ctx {
struct io_hash_table cancel_table;
bool poll_multi_queue;
+ struct llist_head work_llist;
+
struct list_head io_buffers_comp;
} ____cacheline_aligned_in_smp;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ea30f00dc145..a325532aeab5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -212,7 +212,7 @@ struct iommu_iotlb_gather {
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
- * @dev_has/enable/disable_feat: per device entries to check/enable/disable
+ * @dev_enable/disable_feat: per device entries to enable/disable
* iommu specific features.
* @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device
@@ -227,7 +227,7 @@ struct iommu_iotlb_gather {
* @owner: Driver module providing these ops
*/
struct iommu_ops {
- bool (*capable)(enum iommu_cap);
+ bool (*capable)(struct device *dev, enum iommu_cap);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
@@ -416,11 +416,9 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
return dev->iommu->iommu_dev->ops;
}
-extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
-extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern struct iommu_group *iommu_group_get_by_id(int id);
extern void iommu_domain_free(struct iommu_domain *domain);
@@ -697,11 +695,6 @@ static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
return false;
}
-static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
-{
- return false;
-}
-
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
return NULL;
@@ -1070,4 +1063,40 @@ void iommu_debugfs_setup(void);
static inline void iommu_debugfs_setup(void) {}
#endif
+#ifdef CONFIG_IOMMU_DMA
+#include <linux/msi.h>
+
+/* Setup call for arch DMA mapping code */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
+
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
+
+#else /* CONFIG_IOMMU_DMA */
+
+struct msi_desc;
+struct msi_msg;
+
+static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
+{
+}
+
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return 0;
+}
+
+static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 616b683563a9..27642ca15d93 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -79,7 +79,8 @@ struct resource {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -172,6 +173,11 @@ enum {
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
+#define DEFINE_RES_REG(_start, _size) \
+ DEFINE_RES_REG_NAMED((_start), (_size), NULL)
+
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index a533cae189d7..cb71aa616bd3 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -46,10 +46,13 @@
*
* iosys_map_set_vaddr(&map, 0xdeadbeaf);
*
- * To set an address in I/O memory, use iosys_map_set_vaddr_iomem().
+ * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or
+ * iosys_map_set_vaddr_iomem().
*
* .. code-block:: c
*
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf);
+ *
* iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf);
*
* Instances of struct iosys_map do not have to be cleaned up, but
@@ -122,6 +125,16 @@ struct iosys_map {
}
/**
+ * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory
+ * @vaddr_iomem_: An I/O-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \
+ { \
+ .vaddr_iomem = (vaddr_iomem_), \
+ .is_iomem = true, \
+ }
+
+/**
* IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
* @map_: The dma-buf mapping structure to copy from
* @offset_: Offset to add to the other mapping
diff --git a/include/linux/iova.h b/include/linux/iova.h
index c6ba6d95d79c..83c00fac2acb 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -75,7 +75,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
new file mode 100644
index 000000000000..c006cf0a25f3
--- /dev/null
+++ b/include/linux/iova_bitmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef _IOVA_BITMAP_H_
+#define _IOVA_BITMAP_H_
+
+#include <linux/types.h>
+
+struct iova_bitmap;
+
+typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length,
+ void *opaque);
+
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size,
+ u64 __user *data);
+void iova_bitmap_free(struct iova_bitmap *bitmap);
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn);
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length);
+
+#endif
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e3e8c8662b49..e8240cf2611a 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -11,6 +11,7 @@
#include <linux/refcount.h>
#include <linux/rhashtable-types.h>
#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
struct user_namespace;
@@ -36,8 +37,8 @@ struct ipc_namespace {
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
- atomic_t msg_bytes;
- atomic_t msg_hdrs;
+ struct percpu_counter percpu_msg_bytes;
+ struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 3a091d0710ae..d5e6024cb2a8 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -44,7 +44,8 @@ static const struct of_device_id drv_name##_irqchip_match_table[] = {
#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
.data = typecheck_irq_init_cb(fn), },
-#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \
+
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
{}, \
}; \
MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
@@ -56,6 +57,7 @@ static struct platform_driver drv_name##_driver = { \
.owner = THIS_MODULE, \
.of_match_table = drv_name##_irqchip_match_table, \
.suppress_bind_attrs = true, \
+ __VA_ARGS__ \
}, \
}; \
builtin_platform_driver(drv_name##_driver)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 1cd4e36890fb..844a8e30e6de 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -169,6 +169,7 @@ int generic_handle_irq_safe(unsigned int irq);
* conversion failed.
*/
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
diff --git a/include/linux/isa.h b/include/linux/isa.h
index e30963190968..4fbbf5e36e08 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -38,6 +38,32 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+#define module_isa_driver_init(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq) \
+static int __init __isa_driver##_init(void) \
+{ \
+ if (__num_irq != __num_isa_dev) { \
+ pr_err("%s: Number of irq (%u) does not match number of base (%u)\n", \
+ __isa_driver.driver.name, __num_irq, __num_isa_dev); \
+ return -EINVAL; \
+ } \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_exit(__isa_driver) \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit)
+
/**
* module_isa_driver() - Helper macro for registering a ISA driver
* @__isa_driver: isa_driver struct
@@ -48,16 +74,22 @@ static inline void isa_unregister_driver(struct isa_driver *d)
* use this macro once, and calling it replaces module_init and module_exit.
*/
#define module_isa_driver(__isa_driver, __num_isa_dev) \
-static int __init __isa_driver##_init(void) \
-{ \
- return isa_register_driver(&(__isa_driver), __num_isa_dev); \
-} \
-module_init(__isa_driver##_init); \
-static void __exit __isa_driver##_exit(void) \
-{ \
- isa_unregister_driver(&(__isa_driver)); \
-} \
-module_exit(__isa_driver##_exit);
+module_isa_driver_init(__isa_driver, __num_isa_dev); \
+module_isa_driver_exit(__isa_driver)
+
+/**
+ * module_isa_driver_with_irq() - Helper macro for registering an ISA driver with irq
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ * @__num_irq: number of IRQ to register
+ *
+ * Helper macro for ISA drivers with irq that do not do anything special in
+ * module init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init and module_exit.
+ */
+#define module_isa_driver_with_irq(__isa_driver, __num_isa_dev, __num_irq) \
+module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq); \
+module_isa_driver_exit(__isa_driver)
/**
* max_num_isa_dev() - Maximum possible number registered of an ISA device
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index 3bfebde5a1a6..e27bd4f55d84 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -123,17 +123,12 @@ inode_peek_iversion_raw(const struct inode *inode)
static inline void
inode_set_max_iversion_raw(struct inode *inode, u64 val)
{
- u64 cur, old;
+ u64 cur = inode_peek_iversion_raw(inode);
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
+ do {
if (cur > val)
break;
- old = atomic64_cmpxchg(&inode->i_version, cur, val);
- if (likely(old == cur))
- break;
- cur = old;
- }
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
}
/**
@@ -177,56 +172,7 @@ inode_set_iversion_queried(struct inode *inode, u64 val)
I_VERSION_QUERIED);
}
-/**
- * inode_maybe_inc_iversion - increments i_version
- * @inode: inode with the i_version that should be updated
- * @force: increment the counter even if it's not necessary?
- *
- * Every time the inode is modified, the i_version field must be seen to have
- * changed by any observer.
- *
- * If "force" is set or the QUERIED flag is set, then ensure that we increment
- * the value, and clear the queried flag.
- *
- * In the common case where neither is set, then we can return "false" without
- * updating i_version.
- *
- * If this function returns false, and no other metadata has changed, then we
- * can avoid logging the metadata.
- */
-static inline bool
-inode_maybe_inc_iversion(struct inode *inode, bool force)
-{
- u64 cur, old, new;
-
- /*
- * The i_version field is not strictly ordered with any other inode
- * information, but the legacy inode_inc_iversion code used a spinlock
- * to serialize increments.
- *
- * Here, we add full memory barriers to ensure that any de-facto
- * ordering with other info is preserved.
- *
- * This barrier pairs with the barrier in inode_query_iversion()
- */
- smp_mb();
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
- /* If flag is clear then we needn't do anything */
- if (!force && !(cur & I_VERSION_QUERIED))
- return false;
-
- /* Since lowest bit is flag, add 2 to avoid it */
- new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
-
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
- return true;
-}
-
+bool inode_maybe_inc_iversion(struct inode *inode, bool force);
/**
* inode_inc_iversion - forcibly increment i_version
@@ -304,10 +250,10 @@ inode_peek_iversion(const struct inode *inode)
static inline u64
inode_query_iversion(struct inode *inode)
{
- u64 cur, old, new;
+ u64 cur, new;
cur = inode_peek_iversion_raw(inode);
- for (;;) {
+ do {
/* If flag is already set, then no need to swap */
if (cur & I_VERSION_QUERIED) {
/*
@@ -320,11 +266,7 @@ inode_query_iversion(struct inode *inode)
}
new = cur | I_VERSION_QUERIED;
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
return cur >> I_VERSION_QUERIED_SHIFT;
}
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index ad39636e0c3f..649faac31ddb 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -15,7 +15,7 @@
#include <asm/sections.h>
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
(KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b092277bf48d..d811b3d7d2a1 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -98,19 +98,13 @@ static inline bool kasan_has_integrated_init(void)
#ifdef CONFIG_KASAN
struct kasan_cache {
+#ifdef CONFIG_KASAN_GENERIC
int alloc_meta_offset;
int free_meta_offset;
+#endif
bool is_kmalloc;
};
-slab_flags_t __kasan_never_merge(void);
-static __always_inline slab_flags_t kasan_never_merge(void)
-{
- if (kasan_enabled())
- return __kasan_never_merge();
- return 0;
-}
-
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -134,15 +128,6 @@ static __always_inline void kasan_unpoison_pages(struct page *page,
__kasan_unpoison_pages(page, order, init);
}
-void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags);
-static __always_inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size, slab_flags_t *flags)
-{
- if (kasan_enabled())
- __kasan_cache_create(cache, size, flags);
-}
-
void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
{
@@ -150,14 +135,6 @@ static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
__kasan_cache_create_kmalloc(cache);
}
-size_t __kasan_metadata_size(struct kmem_cache *cache);
-static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
-{
- if (kasan_enabled())
- return __kasan_metadata_size(cache);
- return 0;
-}
-
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
@@ -269,20 +246,12 @@ static __always_inline bool kasan_check_byte(const void *addr)
#else /* CONFIG_KASAN */
-static inline slab_flags_t kasan_never_merge(void)
-{
- return 0;
-}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
bool init) {}
-static inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size,
- slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
@@ -333,6 +302,11 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
+size_t kasan_metadata_size(struct kmem_cache *cache);
+slab_flags_t kasan_never_merge(void);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
+
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
@@ -340,6 +314,21 @@ void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache)
+{
+ return 0;
+}
+/* And thus nothing prevents cache merging. */
+static inline slab_flags_t kasan_never_merge(void)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
+static inline void kasan_cache_create(struct kmem_cache *cache,
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 367044d7708c..73f5c120def8 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -108,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
+ KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
+ KERNFS_REMOVING = 0x4000,
};
/* @flags for kernfs_create_root() */
@@ -429,6 +431,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
+void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 13e6c4b58f07..41a686996aaa 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -427,7 +427,7 @@ extern int kexec_load_disabled;
extern bool kexec_in_progress;
int crash_shrink_memory(unsigned long new_size);
-size_t crash_get_memory_size(void);
+ssize_t crash_get_memory_size(void);
#ifndef arch_kexec_protect_crashkres
/*
diff --git a/include/linux/key.h b/include/linux/key.h
index 7febc4881363..d27477faf00d 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -88,6 +88,12 @@ enum key_need_perm {
KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
};
+enum key_lookup_flag {
+ KEY_LOOKUP_CREATE = 0x01,
+ KEY_LOOKUP_PARTIAL = 0x02,
+ KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 384f034ae947..70162d707caf 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -16,11 +16,13 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
#ifdef CONFIG_SHMEM
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
#else
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
+ return 0;
}
#endif
@@ -46,9 +48,10 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
}
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
+ return 0;
}
static inline void khugepaged_min_free_kbytes_update(void)
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..c4cae333deec
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..e38ae3c34618
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range.
+ */
+void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory.
+ */
+void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @page: first page of the buffer.
+ * @offset: offset of the buffer within the first page.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline int kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+ return 0;
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(struct page *page, size_t offset,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..8bfa6c98176d
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ bool allow_reporting;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 55041d2f884d..a0b92be98984 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -103,6 +103,7 @@ struct kprobe {
* this flag is only for optimized_kprobe.
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */
static inline bool kprobe_gone(struct kprobe *p)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 0b4f17418f64..7e232ba59b86 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -15,9 +15,6 @@
#include <linux/sched.h>
#include <linux/sched/coredump.h>
-struct stable_node;
-struct mem_cgroup;
-
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f4519d3689e1..32f259fa5801 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -151,12 +151,11 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
*/
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK 2
-#define KVM_REQ_UNHALT 3
#define KVM_REQUEST_ARCH_BASE 8
/*
@@ -2248,6 +2247,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
* This defines how many reserved entries we want to keep before we
* kick the vcpu to the userspace to avoid dirty ring full. This
* value can be tuned to higher if e.g. PML is enabled on the host.
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 698032e5ef2d..fe990176e6ee 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -101,7 +101,7 @@ enum {
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 21), /* Priority cmds sent to dev */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@@ -1136,8 +1136,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h
index fd3d0b358f22..2e4f4c3539c0 100644
--- a/include/linux/linear_range.h
+++ b/include/linux/linear_range.h
@@ -26,6 +26,17 @@ struct linear_range {
unsigned int step;
};
+#define LINEAR_RANGE(_min, _min_sel, _max_sel, _step) \
+ { \
+ .min = _min, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .step = _step, \
+ }
+
+#define LINEAR_RANGE_IDX(_idx, _min, _min_sel, _max_sel, _step) \
+ [_idx] = LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
unsigned int linear_range_values_in_range(const struct linear_range *r);
unsigned int linear_range_values_in_range_array(const struct linear_range *r,
int ranges);
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 60fff133c0b1..ec119da1d89b 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -224,6 +224,7 @@ LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
struct inode *inode)
+LSM_HOOK(int, 0, userns_create, const struct cred *cred)
LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
u32 *secid)
@@ -253,7 +254,7 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
-LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
LSM_HOOK(int, 0, ismaclabel, const char *name)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 3aa6030302f5..4ec80b96c22e 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -806,6 +806,10 @@
* security attributes, e.g. for /proc/pid inodes.
* @p contains the task_struct for the task.
* @inode contains the inode structure for the inode.
+ * @userns_create:
+ * Check permission prior to creating a new user namespace.
+ * @cred points to prepared creds.
+ * Return 0 if successful, otherwise < 0 error code.
*
* Security hooks for Netlink messaging.
*
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..2effab72add1
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+/* #define CONFIG_DEBUG_MAPLE_TREE_VERBOSE */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a range
+ * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
+ * flag.
+ *
+ * Node types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0x??1 : Root
+ * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0x010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end;
+ unsigned char gap;
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+typedef struct lockdep_map *lockdep_map_p;
+#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+#else
+typedef struct { /* nothing */ } lockdep_map_p;
+#define mt_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+ lockdep_map_p ma_external_lock;
+ };
+ void __rcu *ma_root;
+ unsigned int ma_flags;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned char node_end; /* mas->node end */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+
+/*
+ * Special values for ma_state.node.
+ * MAS_START means we have not searched the tree.
+ * MAS_ROOT means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * MAS_NONE means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MAS_START ((struct maple_enode *)1UL)
+#define MAS_ROOT ((struct maple_enode *)5UL)
+#define MAS_NONE ((struct maple_enode *)9UL)
+#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = MAS_START, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .alloc = NULL, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+bool mas_is_err(struct ma_state *mas);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_next(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+
+/* Checks if a mas has not found anything */
+static inline bool mas_is_none(struct ma_state *mas)
+{
+ return mas->node == MAS_NONE;
+}
+
+/* Checks if a mas has been paused */
+static inline bool mas_is_paused(struct ma_state *mas)
+{
+ return mas->node == MAS_PAUSE;
+}
+
+void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas);
+void mas_dup_store(struct ma_state *mas, void *entry);
+
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static inline void mas_reset(struct ma_state *mas)
+{
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ *
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas->index = start;
+ mas->last = last;
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to update to track the location in the tree
+ * @__max: The maximum limit for @index
+ *
+ * Note: Will not return the zero entry.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt);
+void mt_validate(struct maple_tree *mt);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 47ad3b104d9e..139d05b26f82 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -10,6 +10,9 @@
#ifndef MDEV_H
#define MDEV_H
+#include <linux/device.h>
+#include <linux/uuid.h>
+
struct mdev_type;
struct mdev_device {
@@ -20,67 +23,67 @@ struct mdev_device {
bool active;
};
-static inline struct mdev_device *to_mdev_device(struct device *dev)
-{
- return container_of(dev, struct mdev_device, dev);
-}
+struct mdev_type {
+ /* set by the driver before calling mdev_register parent: */
+ const char *sysfs_name;
+ const char *pretty_name;
-unsigned int mdev_get_type_group_id(struct mdev_device *mdev);
-unsigned int mtype_get_type_group_id(struct mdev_type *mtype);
-struct device *mtype_get_parent_dev(struct mdev_type *mtype);
+ /* set by the core, can be used drivers */
+ struct mdev_parent *parent;
-/* interface for exporting mdev supported type attributes */
-struct mdev_type_attribute {
- struct attribute attr;
- ssize_t (*show)(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf);
- ssize_t (*store)(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, const char *buf,
- size_t count);
+ /* internal only */
+ struct kobject kobj;
+ struct kobject *devices_kobj;
};
-#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
-struct mdev_type_attribute mdev_type_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-#define MDEV_TYPE_ATTR_RW(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
-#define MDEV_TYPE_ATTR_RO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
-#define MDEV_TYPE_ATTR_WO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+/* embedded into the struct device that the mdev devices hang off */
+struct mdev_parent {
+ struct device *dev;
+ struct mdev_driver *mdev_driver;
+ struct kset *mdev_types_kset;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
+ struct mdev_type **types;
+ unsigned int nr_types;
+ atomic_t available_instances;
+};
+
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
/**
* struct mdev_driver - Mediated device driver
+ * @device_api: string to return for the device_api sysfs
+ * @max_instances: maximum number of instances supported (optional)
* @probe: called when new device created
* @remove: called when device removed
- * @supported_type_groups: Attributes to define supported types. It is mandatory
- * to provide supported types.
+ * @get_available: Return the max number of instances that can be created
+ * @show_description: Print a description of the mtype
* @driver: device driver structure
- *
**/
struct mdev_driver {
+ const char *device_api;
+ unsigned int max_instances;
int (*probe)(struct mdev_device *dev);
void (*remove)(struct mdev_device *dev);
- struct attribute_group **supported_type_groups;
+ unsigned int (*get_available)(struct mdev_type *mtype);
+ ssize_t (*show_description)(struct mdev_type *mtype, char *buf);
struct device_driver driver;
};
-extern struct bus_type mdev_bus_type;
-
-int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver);
-void mdev_unregister_device(struct device *dev);
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types);
+void mdev_unregister_parent(struct mdev_parent *parent);
int mdev_register_driver(struct mdev_driver *drv);
void mdev_unregister_driver(struct mdev_driver *drv);
-struct device *mdev_parent_dev(struct mdev_device *mdev);
static inline struct device *mdev_dev(struct mdev_device *mdev)
{
return &mdev->dev;
}
-static inline struct mdev_device *mdev_from_dev(struct device *dev)
-{
- return dev->bus == &mdev_bus_type ? to_mdev_device(dev) : NULL;
-}
#endif /* MDEV_H */
diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
index b1d27f7cd23f..65b550a6fc32 100644
--- a/include/linux/mdio/mdio-i2c.h
+++ b/include/linux/mdio/mdio-i2c.h
@@ -11,6 +11,14 @@ struct device;
struct i2c_adapter;
struct mii_bus;
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c);
+enum mdio_i2c_proto {
+ MDIO_I2C_NONE,
+ MDIO_I2C_MARVELL_C22,
+ MDIO_I2C_C45,
+ MDIO_I2C_ROLLBALL,
+};
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol);
#endif
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
index 587f25128848..506912ad363b 100644
--- a/include/linux/mei_aux.h
+++ b/include/linux/mei_aux.h
@@ -7,10 +7,22 @@
#include <linux/auxiliary_bus.h>
+/**
+ * struct mei_aux_device - mei auxiliary device
+ * @aux_dev: - auxiliary device object
+ * @irq: interrupt driving the mei auxiliary device
+ * @bar: mmio resource bar reserved to mei auxiliary device
+ * @ext_op_mem: resource for extend operational memory
+ * used in graphics PXP mode.
+ * @slow_firmware: The device has slow underlying firmware.
+ * Such firmware will require to use larger operation timeouts.
+ */
struct mei_aux_device {
struct auxiliary_device aux_dev;
int irq;
struct resource bar;
+ struct resource ext_op_mem;
+ bool slow_firmware;
};
#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6257867fbf95..e1644a24009c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -80,29 +80,8 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-struct memcg_vmstats_percpu {
- /* Local (CPU and cgroup) page state & events */
- long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
-
- /* Delta calculation for lockless upward propagation */
- long state_prev[MEMCG_NR_STAT];
- unsigned long events_prev[NR_VM_EVENT_ITEMS];
-
- /* Cgroup1: threshold notifications & softlimit tree updates */
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
-
-struct memcg_vmstats {
- /* Aggregated (CPU and subtree) page state & events */
- long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
-
- /* Pending child counts during tree propagation */
- long state_pending[MEMCG_NR_STAT];
- unsigned long events_pending[NR_VM_EVENT_ITEMS];
-};
+struct memcg_vmstats_percpu;
+struct memcg_vmstats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
@@ -185,15 +164,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-#if defined(CONFIG_SMP)
-struct memcg_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define MEMCG_PADDING(name) struct memcg_padding name
-#else
-#define MEMCG_PADDING(name)
-#endif
-
/*
* Remember four most recent foreign writebacks with dirty pages in this
* cgroup. Inode sharing is expected to be uncommon and, even if we miss
@@ -304,10 +274,10 @@ struct mem_cgroup {
spinlock_t move_lock;
unsigned long move_lock_flags;
- MEMCG_PADDING(_pad1_);
+ CACHELINE_PADDING(_pad1_);
/* memory.stat */
- struct memcg_vmstats vmstats;
+ struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
@@ -326,7 +296,7 @@ struct mem_cgroup {
struct list_head objcg_list;
#endif
- MEMCG_PADDING(_pad2_);
+ CACHELINE_PADDING(_pad2_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
@@ -350,14 +320,20 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
struct mem_cgroup_per_node *nodeinfo[];
};
/*
- * size of first charge trial. "32" comes from vmscan.c's magic value.
- * TODO: maybe necessary to use big numbers in big irons.
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
*/
-#define MEMCG_CHARGE_BATCH 32U
+#define MEMCG_CHARGE_BATCH 64U
extern struct mem_cgroup *root_mem_cgroup;
@@ -444,6 +420,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
+ * - mem_cgroup_trylock_pages()
*
* For a kmem folio a caller should hold an rcu read lock to protect memcg
* associated with a kmem folio from being released.
@@ -505,6 +482,7 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
+ * - mem_cgroup_trylock_pages()
*
* For a kmem page a caller should hold an rcu read lock to protect memcg
* associated with a kmem page from being released.
@@ -689,7 +667,7 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@@ -959,6 +937,23 @@ void unlock_page_memcg(struct page *page);
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
+/* try to stablize folio_memcg() for all the pages in a memcg */
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ rcu_read_lock();
+
+ if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
+ return true;
+
+ rcu_read_unlock();
+ return false;
+}
+
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
+
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
@@ -985,15 +980,7 @@ static inline void mod_memcg_page_state(struct page *page,
rcu_read_unlock();
}
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
-{
- long x = READ_ONCE(memcg->vmstats.state[idx]);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
@@ -1238,7 +1225,7 @@ static inline int mem_cgroup_charge(struct folio *folio,
return 0;
}
-static inline int mem_cgroup_swapin_charge_page(struct page *page,
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
return 0;
@@ -1433,6 +1420,18 @@ static inline void folio_memcg_unlock(struct folio *folio)
{
}
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ /* to match folio_memcg_rcu() */
+ rcu_read_lock();
+ return true;
+}
+
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
+
static inline void mem_cgroup_handle_over_high(void)
{
}
@@ -1779,7 +1778,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
struct mem_cgroup *memcg;
- if (mem_cgroup_kmem_disabled())
+ if (!memcg_kmem_enabled())
return;
rcu_read_lock();
@@ -1788,42 +1787,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_unlock();
}
-/**
- * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object.
- * @p: pointer to object from which memcg should be extracted. It can be NULL.
- *
- * Retrieves the memory group into which the memory of the pointed kernel
- * object is accounted. If memcg is found, its reference is taken.
- * If a passed kernel object is uncharged, or if proper memcg cannot be found,
- * as well as if mem_cgroup is disabled, NULL is returned.
- *
- * Return: valid memcg pointer with taken reference or NULL.
- */
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- struct mem_cgroup *memcg;
-
- rcu_read_lock();
- do {
- memcg = mem_cgroup_from_obj(p);
- } while (memcg && !css_tryget(&memcg->css));
- rcu_read_unlock();
- return memcg;
-}
-
-/**
- * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup.
- * @memcg: pointer to a valid memory cgroup or NULL.
- *
- * If passed argument is not NULL, returns it without any additional checks
- * and changes. Otherwise, root_mem_cgroup is returned.
- *
- * NOTE: root_mem_cgroup can be NULL during early boot.
- */
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return memcg ? memcg : root_mem_cgroup;
-}
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1880,15 +1843,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
}
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return NULL;
-}
#endif /* CONFIG_MEMCG_KMEM */
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..965009aa01d7
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_HOTPLUG_PRIO 100
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibiling;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void destroy_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void destroy_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index e0b2209ab71c..9fcbf5706595 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -11,7 +11,6 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
struct memory_group;
struct resource;
struct vmem_altmap;
@@ -44,11 +43,6 @@ extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
({ \
memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
extern pg_data_t *node_data[];
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
@@ -64,9 +58,6 @@ static inline pg_data_t *generic_alloc_nodedata(int nid)
BUG();
return NULL;
}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
-{
-}
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
}
@@ -216,6 +207,22 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
+{
+ mutex_lock(&pgdat->kswapd_lock);
+}
+
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
+{
+ mutex_unlock(&pgdat->kswapd_lock);
+}
+
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
+{
+ mutex_init(&pgdat->kswapd_lock);
+}
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
({ \
@@ -252,6 +259,10 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
/*
@@ -333,7 +344,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 668389b4b53d..d232de7cdc56 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -151,13 +151,6 @@ extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
-{
- struct mempolicy *mpol = get_task_policy(current);
-
- return policy_nodemask(gfp, mpol);
-}
-
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -189,6 +182,7 @@ static inline bool mpol_is_preferred_many(struct mempolicy *pol)
return (pol->mode == MPOL_PREFERRED_MANY);
}
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
#else
@@ -294,11 +288,6 @@ static inline void mpol_put_task_policy(struct task_struct *task)
{
}
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
-{
- return NULL;
-}
-
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
return false;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 19010491a603..7fcaf3180a5b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -139,6 +139,11 @@ struct dev_pagemap {
};
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
@@ -182,6 +187,7 @@ static inline bool folio_is_device_coherent(const struct folio *folio)
}
#ifdef CONFIG_ZONE_DEVICE
+void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
diff --git a/include/linux/mfd/ocelot.h b/include/linux/mfd/ocelot.h
new file mode 100644
index 000000000000..dd72073d2d4f
--- /dev/null
+++ b/include/linux/mfd/ocelot.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2022 Innovative Advantage Inc. */
+
+#ifndef _LINUX_MFD_OCELOT_H
+#define _LINUX_MFD_OCELOT_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct resource;
+
+static inline struct regmap *
+ocelot_regmap_from_resource_optional(struct platform_device *pdev,
+ unsigned int index,
+ const struct regmap_config *config)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+
+ /*
+ * Don't use _get_and_ioremap_resource() here, since that will invoke
+ * prints of "invalid resource" which will simply add confusion.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res) {
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+ return devm_regmap_init_mmio(dev, regs, config);
+ }
+
+ /*
+ * Fall back to using REG and getting the resource from the parent
+ * device, which is possible in an MFD configuration
+ */
+ if (dev->parent) {
+ res = platform_get_resource(pdev, IORESOURCE_REG, index);
+ if (!res)
+ return NULL;
+
+ return dev_get_regmap(dev->parent, res->name);
+ }
+
+ return NULL;
+}
+
+static inline struct regmap *
+ocelot_regmap_from_resource(struct platform_device *pdev, unsigned int index,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+
+ map = ocelot_regmap_from_resource_optional(pdev, index, config);
+ return map ?: ERR_PTR(-ENOENT);
+}
+
+#endif
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 58602032e642..9af1f3105f80 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -519,6 +519,77 @@ enum rk809_reg_id {
#define MIC_DIFF_DIS (0x0 << 7)
#define MIC_DIFF_EN (0x1 << 7)
+/* RK817 Battery Registers */
+#define RK817_GAS_GAUGE_ADC_CONFIG0 0x50
+#define RK817_GG_EN (0x1 << 7)
+#define RK817_SYS_VOL_ADC_EN (0x1 << 6)
+#define RK817_TS_ADC_EN (0x1 << 5)
+#define RK817_USB_VOL_ADC_EN (0x1 << 4)
+#define RK817_BAT_VOL_ADC_EN (0x1 << 3)
+#define RK817_BAT_CUR_ADC_EN (0x1 << 2)
+
+#define RK817_GAS_GAUGE_ADC_CONFIG1 0x55
+
+#define RK817_VOL_CUR_CALIB_UPD BIT(7)
+
+#define RK817_GAS_GAUGE_GG_CON 0x56
+#define RK817_GAS_GAUGE_GG_STS 0x57
+
+#define RK817_BAT_CON (0x1 << 4)
+#define RK817_RELAX_VOL_UPD (0x3 << 2)
+#define RK817_RELAX_STS (0x1 << 1)
+
+#define RK817_GAS_GAUGE_RELAX_THRE_H 0x58
+#define RK817_GAS_GAUGE_RELAX_THRE_L 0x59
+#define RK817_GAS_GAUGE_OCV_THRE_VOL 0x62
+#define RK817_GAS_GAUGE_OCV_VOL_H 0x63
+#define RK817_GAS_GAUGE_OCV_VOL_L 0x64
+#define RK817_GAS_GAUGE_PWRON_VOL_H 0x6b
+#define RK817_GAS_GAUGE_PWRON_VOL_L 0x6c
+#define RK817_GAS_GAUGE_PWRON_CUR_H 0x6d
+#define RK817_GAS_GAUGE_PWRON_CUR_L 0x6e
+#define RK817_GAS_GAUGE_OFF_CNT 0x6f
+#define RK817_GAS_GAUGE_Q_INIT_H3 0x70
+#define RK817_GAS_GAUGE_Q_INIT_H2 0x71
+#define RK817_GAS_GAUGE_Q_INIT_L1 0x72
+#define RK817_GAS_GAUGE_Q_INIT_L0 0x73
+#define RK817_GAS_GAUGE_Q_PRES_H3 0x74
+#define RK817_GAS_GAUGE_Q_PRES_H2 0x75
+#define RK817_GAS_GAUGE_Q_PRES_L1 0x76
+#define RK817_GAS_GAUGE_Q_PRES_L0 0x77
+#define RK817_GAS_GAUGE_BAT_VOL_H 0x78
+#define RK817_GAS_GAUGE_BAT_VOL_L 0x79
+#define RK817_GAS_GAUGE_BAT_CUR_H 0x7a
+#define RK817_GAS_GAUGE_BAT_CUR_L 0x7b
+#define RK817_GAS_GAUGE_USB_VOL_H 0x7e
+#define RK817_GAS_GAUGE_USB_VOL_L 0x7f
+#define RK817_GAS_GAUGE_SYS_VOL_H 0x80
+#define RK817_GAS_GAUGE_SYS_VOL_L 0x81
+#define RK817_GAS_GAUGE_Q_MAX_H3 0x82
+#define RK817_GAS_GAUGE_Q_MAX_H2 0x83
+#define RK817_GAS_GAUGE_Q_MAX_L1 0x84
+#define RK817_GAS_GAUGE_Q_MAX_L0 0x85
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H 0x8f
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_L 0x90
+#define RK817_GAS_GAUGE_CAL_OFFSET_H 0x91
+#define RK817_GAS_GAUGE_CAL_OFFSET_L 0x92
+#define RK817_GAS_GAUGE_VCALIB0_H 0x93
+#define RK817_GAS_GAUGE_VCALIB0_L 0x94
+#define RK817_GAS_GAUGE_VCALIB1_H 0x95
+#define RK817_GAS_GAUGE_VCALIB1_L 0x96
+#define RK817_GAS_GAUGE_IOFFSET_H 0x97
+#define RK817_GAS_GAUGE_IOFFSET_L 0x98
+#define RK817_GAS_GAUGE_BAT_R1 0x9a
+#define RK817_GAS_GAUGE_BAT_R2 0x9b
+#define RK817_GAS_GAUGE_BAT_R3 0x9c
+#define RK817_GAS_GAUGE_DATA0 0x9d
+#define RK817_GAS_GAUGE_DATA1 0x9e
+#define RK817_GAS_GAUGE_DATA2 0x9f
+#define RK817_GAS_GAUGE_DATA3 0xa0
+#define RK817_GAS_GAUGE_DATA4 0xa1
+#define RK817_GAS_GAUGE_DATA5 0xa2
+#define RK817_GAS_GAUGE_CUR_ADC_K0 0xb0
+
#define RK817_POWER_EN_REG(i) (0xb1 + (i))
#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
@@ -544,10 +615,30 @@ enum rk809_reg_id {
#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
#define RK817_BOOST_OTG_CFG (0xde)
+#define RK817_PMIC_CHRG_OUT 0xe4
+#define RK817_CHRG_VOL_SEL (0x07 << 4)
+#define RK817_CHRG_CUR_SEL (0x07 << 0)
+
+#define RK817_PMIC_CHRG_IN 0xe5
+#define RK817_USB_VLIM_EN (0x01 << 7)
+#define RK817_USB_VLIM_SEL (0x07 << 4)
+#define RK817_USB_ILIM_EN (0x01 << 3)
+#define RK817_USB_ILIM_SEL (0x07 << 0)
+#define RK817_PMIC_CHRG_TERM 0xe6
+#define RK817_CHRG_TERM_ANA_DIG (0x01 << 2)
+#define RK817_CHRG_TERM_ANA_SEL (0x03 << 0)
+#define RK817_CHRG_EN (0x01 << 6)
+
+#define RK817_PMIC_CHRG_STS 0xeb
+#define RK817_BAT_EXS BIT(7)
+#define RK817_CHG_STS (0x07 << 4)
+
#define RK817_ID_MSB 0xed
#define RK817_ID_LSB 0xee
#define RK817_SYS_STS 0xf0
+#define RK817_PLUG_IN_STS (0x1 << 6)
+
#define RK817_SYS_CFG(i) (0xf1 + (i))
#define RK817_ON_SOURCE_REG 0xf5
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 22c0a0cf5e0c..3ef77f52a4f0 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -62,6 +62,8 @@ extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
+int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
@@ -100,21 +102,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
-#if defined(CONFIG_MIGRATION) && defined(CONFIG_NUMA)
-extern void set_migration_target_nodes(void);
-extern void migrate_on_reclaim_init(void);
-extern bool numa_demotion_enabled;
-extern int next_demotion_node(int node);
-#else
-static inline void set_migration_target_nodes(void) {}
-static inline void migrate_on_reclaim_init(void) {}
-static inline int next_demotion_node(int node)
-{
- return NUMA_NO_NODE;
-}
-#define numa_demotion_enabled false
-#endif
-
#ifdef CONFIG_COMPACTION
bool PageMovable(struct page *page);
void __SetPageMovable(struct page *page, const struct movable_operations *ops);
@@ -212,11 +199,24 @@ struct migrate_vma {
*/
void *pgmap_owner;
unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
};
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
+
#endif /* CONFIG_MIGRATION */
#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b5f58fd37a0f..1ff91cb79ded 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -325,6 +325,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
@@ -699,6 +700,12 @@ struct mlx5_eqe_temp_warning {
__be64 sensor_warning_lsb;
} __packed;
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+} __packed;
+
#define SYNC_RST_STATE_MASK 0xf
enum sync_rst_state_type {
@@ -737,6 +744,7 @@ union ev_data {
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update;
struct mlx5_eqe_vhca_state vhca_state;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -874,12 +882,6 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
return cqe->op_own >> 4;
}
-static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
-{
- /* num_of_mini_cqes is zero based */
- return get_cqe_opcode(cqe) + 1;
-}
-
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
@@ -890,11 +892,6 @@ static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
-{
- return (cqe->l4_l3_hdr_type >> 2) & 0x3;
-}
-
static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
return cqe->tls_outer_l3_tunneled & 0x1;
@@ -1198,8 +1195,10 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_DEV_SHAMPO = 0x1d,
+ MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1365,6 +1364,14 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(port_selection_cap, \
mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
+
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
@@ -1446,6 +1453,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
+#define MLX5_CAP_MACSEC(mdev, cap)\
+ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index c32de987fa71..a12929bc31b2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -698,6 +698,8 @@ struct mlx5_pps {
struct work_struct out_work;
u64 start[MAX_PIN_NUM];
u8 enabled;
+ u64 min_npps_period;
+ u64 min_out_pulse_duration_ns;
};
struct mlx5_timer {
@@ -855,11 +857,6 @@ struct mlx5_cmd_work_ent {
refcount_t refcnt;
};
-struct mlx5_pas {
- u64 pa;
- u8 log_sz;
-};
-
enum phy_port_state {
MLX5_AAA_111
};
@@ -1016,11 +1013,11 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
@@ -1085,8 +1082,6 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz);
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
@@ -1153,6 +1148,7 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
@@ -1293,4 +1289,8 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
return mlx5_is_roce_on(dev);
}
+enum {
+ MLX5_OCTWORD = 16,
+};
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 8e73c377da2c..c7a91981cd5a 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -79,6 +79,7 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
@@ -92,7 +93,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
@@ -243,10 +245,10 @@ struct mlx5_flow_act {
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
- union {
- u32 ipsec_obj_id;
- uintptr_t esp_id;
- };
+ struct mlx5_flow_act_crypto_params {
+ u8 type;
+ u32 obj_id;
+ } crypto;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
index 9db21cd0e92c..bc5125bc0561 100644
--- a/include/linux/mlx5/fs_helpers.h
+++ b/include/linux/mlx5/fs_helpers.h
@@ -38,46 +38,6 @@
#define MLX5_FS_IPV4_VERSION 4
#define MLX5_FS_IPV6_VERSION 6
-static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
-static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
- const u32 *match_v, u8 match)
-{
- const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
- outer_headers);
-
- return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
-}
-
-static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
-}
-
-static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
-}
-
-static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
-}
-
static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
const u32 *match_c,
const u32 *match_v, int version)
@@ -131,12 +91,4 @@ mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
MLX5_FS_IPV6_VERSION);
}
-static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c =
- MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 4acd5610e96b..5a4e914e2a6f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -68,6 +68,7 @@ enum {
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION = 0x25,
};
enum {
@@ -82,6 +83,7 @@ enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -89,6 +91,7 @@ enum {
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -449,7 +452,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_60[0x2];
u8 reformat_insert[0x1];
u8 reformat_remove[0x1];
- u8 reserver_at_64[0x14];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 reserved_at_66[0x2];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reserved_at_6a[0xe];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -476,6 +484,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 reserved_at_6[0x1a];
};
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -611,7 +635,11 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 reserved_at_1a0[0x8];
+
+ u8 macsec_syndrome[0x8];
+
+ u8 reserved_at_1b0[0x50];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
@@ -813,7 +841,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_port_selection_cap_bits {
u8 reserved_at_0[0x10];
u8 port_select_flow_table[0x1];
- u8 reserved_at_11[0xf];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
u8 reserved_at_20[0x1e0];
@@ -1276,6 +1306,24 @@ struct mlx5_ifc_ipsec_cap_bits {
u8 reserved_at_30[0x7d0];
};
+struct mlx5_ifc_macsec_cap_bits {
+ u8 macsec_epn[0x1];
+ u8 reserved_at_1[0x2];
+ u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_macsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_macsec_full_replay_window[0x8];
+ u8 max_log_macsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x7c0];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -1443,7 +1491,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_120[0xa];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0x9];
+ u8 reserved_at_130[0x2];
+ u8 eth_wqe_too_small[0x1];
+ u8 reserved_at_133[0x6];
u8 vnic_env_cq_overrun[0x1];
u8 log_max_ra_res_dc[0x6];
@@ -1707,7 +1757,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 steering_format_version[0x4];
u8 create_qp_start_hint[0x18];
- u8 reserved_at_460[0x3];
+ u8 reserved_at_460[0x1];
+ u8 ats[0x1];
+ u8 reserved_at_462[0x1];
u8 log_max_uctx[0x5];
u8 reserved_at_468[0x2];
u8 ipsec_offload[0x1];
@@ -1733,7 +1785,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
- u8 reserved_at_570[0x10];
+ u8 reserved_at_570[0x9];
+ u8 adv_virtualization[0x1];
+ u8 reserved_at_57a[0x6];
u8 reserved_at_580[0xb];
u8 log_max_dci_stream_channels[0x5];
@@ -1828,7 +1882,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 max_reformat_remove_size[0x8];
u8 max_reformat_remove_offset[0x8];
- u8 reserved_at_c0[0x160];
+ u8 reserved_at_c0[0xe0];
+
+ u8 reserved_at_1a0[0xb];
+ u8 log_min_mkey_entity_size[0x5];
+ u8 reserved_at_1b0[0x10];
+
+ u8 reserved_at_1c0[0x60];
u8 reserved_at_220[0x1];
u8 sw_vhca_id_valid[0x1];
@@ -3295,6 +3355,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
struct mlx5_ifc_shampo_cap_bits shampo_cap;
+ struct mlx5_ifc_macsec_cap_bits macsec_cap;
u8 reserved_at_0[0x8000];
};
@@ -3310,8 +3371,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
};
@@ -3321,6 +3382,11 @@ enum {
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
};
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+};
+
struct mlx5_ifc_vlan_bits {
u8 ethtype[0x10];
u8 prio[0x3];
@@ -3374,7 +3440,7 @@ struct mlx5_ifc_flow_context_bits {
u8 extended_destination[0x1];
u8 reserved_at_81[0x1];
u8 flow_source[0x2];
- u8 reserved_at_84[0x4];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
@@ -3386,7 +3452,7 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2;
- u8 ipsec_obj_id[0x20];
+ u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -3475,7 +3541,9 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 cq_overrun[0x20];
- u8 reserved_at_220[0xde0];
+ u8 eth_wqe_too_small[0x20];
+
+ u8 reserved_at_220[0xdc0];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -3873,7 +3941,9 @@ struct mlx5_ifc_mkc_bits {
u8 lw[0x1];
u8 lr[0x1];
u8 access_mode_1_0[0x2];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x2];
+ u8 ma_translation_mode[0x2];
+ u8 reserved_at_1c[0x4];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
@@ -6316,6 +6386,8 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
+ MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
+ MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@@ -9789,7 +9861,9 @@ struct mlx5_ifc_pcam_reg_bits {
struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x5d];
u8 mcia_32dwords[0x1];
- u8 reserved_at_5e[0xc];
+ u8 out_pulse_duration_ns[0x1];
+ u8 npps_period[0x1];
+ u8 reserved_at_60[0xa];
u8 reset_state[0x1];
u8 ptpcyc2realtime_modify[0x1];
u8 reserved_at_6c[0x2];
@@ -10289,7 +10363,12 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
- u8 reserved_at_20[0x24];
+ u8 reserved_at_20[0x13];
+ u8 cap_log_min_npps_period[0x5];
+ u8 reserved_at_38[0x3];
+ u8 cap_log_min_out_pulse_duration_ns[0x5];
+
+ u8 reserved_at_40[0x4];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
@@ -10308,7 +10387,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 cap_pin_4_mode[0x4];
u8 field_select[0x20];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 npps_period[0x40];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -10317,7 +10398,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 pin_mode[0x4];
u8 pin[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x2];
+ u8 out_pulse_duration_ns[0x1e];
u8 time_stamp[0x40];
@@ -10920,7 +11002,9 @@ struct mlx5_ifc_lagc_bits {
u8 reserved_at_18[0x5];
u8 lag_state[0x3];
- u8 reserved_at_20[0x14];
+ u8 reserved_at_20[0xc];
+ u8 active_port[0x4];
+ u8 reserved_at_30[0x4];
u8 tx_remap_affinity_2[0x4];
u8 reserved_at_38[0x4];
u8 tx_remap_affinity_1[0x4];
@@ -11134,7 +11218,8 @@ struct mlx5_ifc_dealloc_memic_out_bits {
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
- u8 reserved_at_80[0x1b];
+ u8 ats[0x1];
+ u8 reserved_at_81[0x1a];
u8 log_page_size[0x5];
u8 page_offset[0x20];
@@ -11471,6 +11556,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
};
enum {
@@ -11521,6 +11607,96 @@ struct mlx5_ifc_modify_ipsec_obj_in_bits {
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
+enum {
+ MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+#define MLX5_MACSEC_ASO_INC_SN 0x2
+#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
+
+struct mlx5_ifc_macsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x1];
+ u8 mode[0x2];
+ u8 window_size[0x2];
+ u8 soft_lifetime_arm[0x1];
+ u8 hard_lifetime_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 epn_event_arm[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 remove_flow_packet_count[0x20];
+
+ u8 remove_flow_soft_lifetime[0x20];
+
+ u8 reserved_at_60[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[8][0x20];
+};
+
+struct mlx5_ifc_macsec_offload_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 confidentiality_en[0x1];
+ u8 reserved_at_41[0x1];
+ u8 epn_en[0x1];
+ u8 epn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 confidentiality_offset[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 epn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sci[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 macsec_aso_access_pd[0x18];
+
+ u8 reserved_at_120[0x60];
+
+ u8 salt[3][0x20];
+
+ u8 reserved_at_1e0[0x20];
+
+ struct mlx5_ifc_macsec_aso_bits macsec_aso;
+};
+
+struct mlx5_ifc_create_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_modify_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+enum {
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_macsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];
@@ -11638,6 +11814,7 @@ enum {
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC = 0x4,
};
struct mlx5_ifc_tls_static_params_bits {
@@ -11818,4 +11995,82 @@ struct mlx5_ifc_load_vhca_state_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_adv_virtualization_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 pg_track_log_max_num[0x5];
+ u8 pg_track_max_num_range[0x8];
+ u8 pg_track_log_min_addr_space[0x8];
+ u8 pg_track_log_max_addr_space[0x8];
+
+ u8 reserved_at_20[0x3];
+ u8 pg_track_log_min_msg_size[0x5];
+ u8 reserved_at_28[0x3];
+ u8 pg_track_log_max_msg_size[0x5];
+ u8 reserved_at_30[0x3];
+ u8 pg_track_log_min_page_size[0x5];
+ u8 reserved_at_38[0x3];
+ u8 pg_track_log_max_page_size[0x5];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_page_track_report_entry_bits {
+ u8 dirty_address_high[0x20];
+
+ u8 dirty_address_low[0x20];
+};
+
+enum {
+ MLX5_PAGE_TRACK_STATE_TRACKING,
+ MLX5_PAGE_TRACK_STATE_REPORTING,
+ MLX5_PAGE_TRACK_STATE_ERROR,
+};
+
+struct mlx5_ifc_page_track_range_bits {
+ u8 start_address[0x40];
+
+ u8 length[0x40];
+};
+
+struct mlx5_ifc_page_track_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 state[0x4];
+ u8 track_type[0x4];
+ u8 log_addr_space_size[0x8];
+ u8 reserved_at_90[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_98[0x3];
+ u8 log_msg_size[0x5];
+
+ u8 reserved_at_a0[0x8];
+ u8 reporting_qpn[0x18];
+
+ u8 reserved_at_c0[0x18];
+ u8 num_ranges[0x8];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 range_start_address[0x40];
+
+ u8 length[0x40];
+
+ struct mlx5_ifc_page_track_range_bits track_range[0];
+};
+
+struct mlx5_ifc_create_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_modify_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 45c7c0d67635..0596472923ad 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,30 +32,6 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
-struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_at_0[0x60];
-
- u8 ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
- u8 ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
- struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
- struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_at_0[0x80];
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
-};
-
struct mlx5_ifc_fpga_shell_caps_bits {
u8 max_num_qps[0x10];
u8 reserved_at_10[0x8];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 8bda3ba5b109..4657d5c54abe 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -162,6 +162,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
+#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -252,6 +254,7 @@ enum {
enum {
MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1),
};
struct mlx5_wqe_eth_seg {
@@ -475,6 +478,12 @@ struct mlx5_klm {
__be64 va;
};
+struct mlx5_ksm {
+ __be32 reserved;
+ __be32 key;
+ __be64 va;
+};
+
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 21f8b27bd9fd..8bbcccbc5565 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -661,6 +661,38 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses vma_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return vma_find(vmi, ULONG_MAX);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -697,7 +729,9 @@ static inline unsigned int compound_order(struct page *page)
*/
static inline unsigned int folio_order(struct folio *folio)
{
- return compound_order(&folio->page);
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_folio_order;
}
#include <linux/huge_mm.h>
@@ -1255,6 +1289,18 @@ static inline int folio_nid(const struct folio *folio)
}
#ifdef CONFIG_NUMA_BALANCING
+/* page access time bits needs to hold at least 4 seconds */
+#define PAGE_ACCESS_TIME_MIN_BITS 12
+#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
+#define PAGE_ACCESS_TIME_BUCKETS \
+ (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
+#else
+#define PAGE_ACCESS_TIME_BUCKETS 0
+#endif
+
+#define PAGE_ACCESS_TIME_MASK \
+ (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
+
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
@@ -1318,12 +1364,25 @@ static inline void page_cpupid_reset_last(struct page *page)
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ int last_time;
+
+ last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ return last_time << PAGE_ACCESS_TIME_BUCKETS;
+}
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
return page_to_nid(page); /* XXX */
}
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ return 0;
+}
+
static inline int page_cpupid_last(struct page *page)
{
return page_to_nid(page); /* XXX */
@@ -1465,6 +1524,11 @@ static inline unsigned long folio_pfn(struct folio *folio)
return page_to_pfn(&folio->page);
}
+static inline struct folio *pfn_folio(unsigned long pfn)
+{
+ return page_folio(pfn_to_page(pfn));
+}
+
static inline atomic_t *folio_pincount_ptr(struct folio *folio)
{
return &folio_page(folio, 1)->compound_pincount;
@@ -1597,7 +1661,13 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*/
static inline long folio_nr_pages(struct folio *folio)
{
- return compound_nr(&folio->page);
+ if (!folio_test_large(folio))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
+#else
+ return 1L << folio->_folio_order;
+#endif
}
/**
@@ -1776,7 +1846,11 @@ extern void pagefault_out_of_memory(void);
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
+}
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
@@ -1795,8 +1869,9 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long start, unsigned long end);
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end);
struct mmu_notifier_range;
@@ -2495,7 +2570,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
-extern unsigned long find_min_pfn_with_active_regions(void);
#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
@@ -2516,7 +2590,12 @@ extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+
+extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_mem(flags, nodemask, MAX_NR_ZONES - 1);
+}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -2593,14 +2672,15 @@ extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
extern int split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
- struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
+void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
+
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
@@ -2648,8 +2728,9 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
- struct list_head *uf, bool downgrade);
+extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
@@ -2716,26 +2797,12 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/**
- * find_vma_intersection() - Look up the first VMA which intersects the interval
- * @mm: The process address space.
- * @start_addr: The inclusive start user address.
- * @end_addr: The exclusive end user address.
- *
- * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
- * start_addr < end_addr.
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
*/
-static inline
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
- unsigned long start_addr,
- unsigned long end_addr)
-{
- struct vm_area_struct *vma = find_vma(mm, start_addr);
-
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
-}
+ unsigned long start_addr, unsigned long end_addr);
/**
* vma_lookup() - Find a VMA at a specific address
@@ -2747,12 +2814,7 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
static inline
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (vma && addr < vma->vm_start)
- vma = NULL;
-
- return vma;
+ return mtree_load(&mm->mm_mt, addr);
}
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
@@ -2788,7 +2850,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
- struct vm_area_struct *vma = find_vma(mm, vm_start);
+ struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
@@ -2888,7 +2950,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
* and return without waiting upon it */
#define FOLL_NOFAULT 0x80 /* do not fault in pages */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
@@ -2975,8 +3036,8 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* PageAnonExclusive() has to protect against concurrent GUP:
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
- * * GUP-fast and KSM or temporary unmapping (swap, migration):
- * clear/invalidate+flush of the page table entry
+ * * GUP-fast and KSM or temporary unmapping (swap, migration): see
+ * page_try_share_anon_rmap()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
@@ -2997,6 +3058,11 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
*/
if (!PageAnon(page))
return false;
+
+ /* Paired with a memory barrier in page_try_share_anon_rmap(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_rmb();
+
/*
* Note that PageKsm() pages cannot be exclusive, and consequently,
* cannot get pinned.
@@ -3004,6 +3070,21 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
return !PageAnonExclusive(page);
}
+/*
+ * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
+ * a (NUMA hinting) fault is required.
+ */
+static inline bool gup_can_follow_protnone(unsigned int flags)
+{
+ /*
+ * FOLL_FORCE has to be able to make progress even if the VMA is
+ * inaccessible. Further, FOLL_FORCE access usually does not represent
+ * application behaviour and we should avoid triggering NUMA hinting
+ * faults.
+ */
+ return flags & FOLL_FORCE;
+}
+
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -3011,7 +3092,7 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
-extern void init_mem_debugging_and_hardening(void);
+extern void __init init_mem_debugging_and_hardening(void);
#ifdef CONFIG_PAGE_POISONING
extern void __kernel_poison_pages(struct page *page, int numpages);
extern void __kernel_unpoison_pages(struct page *page, int numpages);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 7b25b53c474a..e8ed225d8f7c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -34,15 +34,25 @@ static inline int page_is_file_lru(struct page *page)
return folio_is_file_lru(page_folio(page));
}
-static __always_inline void update_lru_size(struct lruvec *lruvec,
+static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ lockdep_assert_held(&lruvec->lru_lock);
+ WARN_ON_ONCE(nr_pages != (int)nr_pages);
+
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
+}
+
+static __always_inline void update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, enum zone_type zid,
+ long nr_pages)
+{
+ __update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
@@ -66,11 +76,6 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
__folio_clear_unevictable(folio);
}
-static __always_inline void __clear_page_lru_flags(struct page *page)
-{
- __folio_clear_lru_flags(page_folio(page));
-}
-
/**
* folio_lru_list - Which LRU list should a folio be on?
* @folio: The folio to test.
@@ -94,11 +99,224 @@ static __always_inline enum lru_list folio_lru_list(struct folio *folio)
return lru;
}
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#else
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#endif
+
+static inline bool lru_gen_in_fault(void)
+{
+ return current->in_lru_fault;
+}
+
+static inline int lru_gen_from_seq(unsigned long seq)
+{
+ return seq % MAX_NR_GENS;
+}
+
+static inline int lru_hist_from_seq(unsigned long seq)
+{
+ return seq % NR_HIST_GENS;
+}
+
+static inline int lru_tier_from_refs(int refs)
+{
+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
+
+ /* see the comment in folio_lru_refs() */
+ return order_base_2(refs + 1);
+}
+
+static inline int folio_lru_refs(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+ bool workingset = flags & BIT(PG_workingset);
+
+ /*
+ * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
+ * total number of accesses is N>1, since N=0,1 both map to the first
+ * tier. lru_tier_from_refs() will account for this off-by-one. Also see
+ * the comment on MAX_NR_TIERS.
+ */
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
+}
+
+static inline int folio_lru_gen(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+
+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+{
+ unsigned long max_seq = lruvec->lrugen.max_seq;
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+
+ /* see the comment on MIN_NR_GENS */
+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
+}
+
+static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
+
+ if (old_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
+ lrugen->nr_pages[old_gen][type][zone] - delta);
+ if (new_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
+ lrugen->nr_pages[new_gen][type][zone] + delta);
+
+ /* addition */
+ if (old_gen < 0) {
+ if (lru_gen_is_active(lruvec, new_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, delta);
+ return;
+ }
+
+ /* deletion */
+ if (new_gen < 0) {
+ if (lru_gen_is_active(lruvec, old_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, -delta);
+ return;
+ }
+
+ /* promotion */
+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
+ __update_lru_size(lruvec, lru, zone, -delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
+ }
+
+ /* demotion requires isolation, e.g., lru_deactivate_fn() */
+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+ * There are three common cases for this page:
+ * 1. If it's hot, e.g., freshly faulted in or previously hot and
+ * migrated, add it to the youngest generation.
+ * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+ * not in swapcache or a dirty page pending writeback, add it to the
+ * second oldest generation.
+ * 3. Everything else (clean, cold) is added to the oldest generation.
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ seq = lrugen->min_seq[type] + 1;
+ else
+ seq = lrugen->min_seq[type];
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+
+ return true;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+
+ if (gen < 0)
+ return false;
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+
+ /* for folio_migrate_flags() */
+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
+ flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+
+ lru_gen_update_size(lruvec, folio, gen, -1);
+ list_del(&folio->lru);
+
+ return true;
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_in_fault(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+#endif /* CONFIG_LRU_GEN */
+
static __always_inline
void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
+ if (lru_gen_add_folio(lruvec, folio, false))
+ return;
+
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio));
if (lru != LRU_UNEVICTABLE)
@@ -116,23 +334,23 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
+ if (lru_gen_add_folio(lruvec, folio, true))
+ return;
+
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio));
/* This is not expected to be used on LRU_UNEVICTABLE */
list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec)
-{
- lruvec_add_folio_tail(lruvec, page_folio(page));
-}
-
static __always_inline
void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
+ if (lru_gen_del_folio(lruvec, folio, false))
+ return;
+
if (lru != LRU_UNEVICTABLE)
list_del(&folio->lru);
update_lru_size(lruvec, lru, folio_zonenum(folio),
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cf97f3884fda..500e536796ca 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -223,6 +224,18 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_KMSAN
+ /*
+ * KMSAN metadata for this page:
+ * - shadow page: every bit indicates whether the corresponding
+ * bit of the original page is initialized (0) or not (1);
+ * - origin page: every 4 bytes contain an id of the stack trace
+ * where the uninitialized value was created.
+ */
+ struct page *kmsan_shadow;
+ struct page *kmsan_origin;
+#endif
+
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
@@ -244,6 +257,13 @@ struct page {
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
+ * @_flags_1: For large folios, additional page flags.
+ * @__head: Points to the folio. Do not use.
+ * @_folio_dtor: Which destructor to use for this folio.
+ * @_folio_order: Do not use directly, call folio_order().
+ * @_total_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+ * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@@ -282,9 +302,17 @@ struct folio {
};
struct page page;
};
+ unsigned long _flags_1;
+ unsigned long __head;
+ unsigned char _folio_dtor;
+ unsigned char _folio_order;
+ atomic_t _total_mapcount;
+ atomic_t _pincount;
+#ifdef CONFIG_64BIT
+ unsigned int _folio_nr_pages;
+#endif
};
-static_assert(sizeof(struct page) == sizeof(struct folio));
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
FOLIO_MATCH(flags, flags);
@@ -299,6 +327,19 @@ FOLIO_MATCH(_refcount, _refcount);
FOLIO_MATCH(memcg_data, memcg_data);
#endif
#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + sizeof(struct page))
+FOLIO_MATCH(flags, _flags_1);
+FOLIO_MATCH(compound_head, __head);
+FOLIO_MATCH(compound_dtor, _folio_dtor);
+FOLIO_MATCH(compound_order, _folio_order);
+FOLIO_MATCH(compound_mapcount, _total_mapcount);
+FOLIO_MATCH(compound_pincount, _pincount);
+#ifdef CONFIG_64BIT
+FOLIO_MATCH(compound_nr, _folio_nr_pages);
+#endif
+#undef FOLIO_MATCH
static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
{
@@ -407,21 +448,6 @@ struct vm_area_struct {
unsigned long vm_end; /* The first byte after our end address
within vm_mm. */
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
-
- struct rb_node vm_rb;
-
- /*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
- */
- unsigned long rb_subtree_gap;
-
- /* Second cache line starts here. */
-
struct mm_struct *vm_mm; /* The address space we belong to. */
/*
@@ -485,9 +511,7 @@ struct vm_area_struct {
struct kioctx_table;
struct mm_struct {
struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u64 vmacache_seqnum; /* per-thread vmacache */
+ struct maple_tree mm_mt;
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
@@ -501,7 +525,6 @@ struct mm_struct {
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
#ifdef CONFIG_MEMBARRIER
@@ -631,22 +654,22 @@ struct mm_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and
- * migrate pages to new nodes if necessary.
+ * numa_next_scan is the next time that PTEs will be remapped
+ * PROT_NONE to trigger NUMA hinting faults; such faults gather
+ * statistics and migrate pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
+ /* Restart point for scanning and remapping PTEs. */
unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
+ /* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
* that can move process memory needs to flush the TLB when
- * moving a PROT_NONE or PROT_NUMA mapped page.
+ * moving a PROT_NONE mapped page.
*/
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
@@ -671,7 +694,28 @@ struct mm_struct {
* merging.
*/
unsigned long ksm_merging_pages;
+ /*
+ * Represent how many pages are checked for ksm merging
+ * including merged and not merged.
+ */
+ unsigned long ksm_rmap_items;
+#endif
+#ifdef CONFIG_LRU_GEN
+ struct {
+ /* this mm_struct is on lru_gen_mm_list */
+ struct list_head list;
+ /*
+ * Set when switching to this mm_struct, as a hint of
+ * whether it has been used since the last time per-node
+ * page table walkers cleared the corresponding bits.
+ */
+ unsigned long bitmap;
+#ifdef CONFIG_MEMCG
+ /* points to the memcg of "owner" above */
+ struct mem_cgroup *memcg;
#endif
+ } lru_gen;
+#endif /* CONFIG_LRU_GEN */
} __randomize_layout;
/*
@@ -681,6 +725,7 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
@@ -698,6 +743,87 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
+#ifdef CONFIG_LRU_GEN
+
+struct lru_gen_mm_list {
+ /* mm_struct list for page table walkers */
+ struct list_head fifo;
+ /* protects the list above */
+ spinlock_t lock;
+};
+
+void lru_gen_add_mm(struct mm_struct *mm);
+void lru_gen_del_mm(struct mm_struct *mm);
+#ifdef CONFIG_MEMCG
+void lru_gen_migrate_mm(struct mm_struct *mm);
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+ INIT_LIST_HEAD(&mm->lru_gen.list);
+ mm->lru_gen.bitmap = 0;
+#ifdef CONFIG_MEMCG
+ mm->lru_gen.memcg = NULL;
+#endif
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+ /*
+ * When the bitmap is set, page reclaim knows this mm_struct has been
+ * used since the last time it cleared the bitmap. So it might be worth
+ * walking the page tables of this mm_struct to clear the accessed bit.
+ */
+ WRITE_ONCE(mm->lru_gen.bitmap, -1);
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_add_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_del_mm(struct mm_struct *mm)
+{
+}
+
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
+{
+}
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = MAS_START, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ vmi->mas.tree = &mm->mm_mt;
+ vmi->mas.index = addr;
+ vmi->mas.node = MAS_START;
+}
+
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index c1bc6731125c..0bb4b6da9993 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -25,18 +25,6 @@
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u64 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
-/*
* When updating this, please also update struct resident_page_types[] in
* kernel/fork.c
*/
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 8a30de08e913..c726ea781255 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -293,6 +293,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
bool reenable_cmdq; /* Re-enable Command Queue */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index eb8bc5b9b0b7..8fdd3cf971a3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -476,7 +476,7 @@ struct mmc_host {
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
- struct delayed_work sdio_irq_work;
+ struct work_struct sdio_irq_work;
bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 53f0efa0bccf..74f9d9a6d330 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -74,6 +74,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 15ae78cd2853..b8728d11c949 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -94,6 +94,12 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
+#ifdef CONFIG_DEBUG_VM_IRQSOFF
+#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
+#else
+#define VM_WARN_ON_IRQS_ENABLED() do { } while (0)
+#endif
+
#ifdef CONFIG_DEBUG_VIRTUAL
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond)
#else
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e24b40c52468..5f74891556f3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -24,10 +24,10 @@
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
+#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
#define MAX_ORDER 11
#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
@@ -121,20 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
-/*
- * Add a wild amount of padding here to ensure data fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
@@ -216,11 +202,13 @@ enum node_stat_item {
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
#ifdef CONFIG_NUMA_BALANCING
PGPROMOTE_SUCCESS, /* promote successfully */
+ PGPROMOTE_CANDIDATE, /* candidate pages to promote */
#endif
NR_VM_NODE_STAT_ITEMS
};
@@ -306,6 +294,8 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
+#define WORKINGSET_ANON 0
+#define WORKINGSET_FILE 1
#define ANON_AND_FILE 2
enum lruvec_flags {
@@ -314,6 +304,207 @@ enum lruvec_flags {
*/
};
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Evictable pages are divided into multiple generations. The youngest and the
+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in folio->flags stores gen+1 while
+ * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ *
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+ * eviction. The first check takes care of the accessed bit set on the initial
+ * fault; the second check makes sure this page hasn't been used since then.
+ * This process, AKA second chance, requires a minimum of two generations,
+ * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
+ * LRU, e.g., /proc/vmstat, these two generations are considered active; the
+ * rest of generations, if they exist, are considered inactive. See
+ * lru_gen_is_active().
+ *
+ * PG_active is always cleared while a page is on one of lrugen->lists[] so that
+ * the aging needs not to worry about it. And it's set again when a page
+ * considered active is isolated for non-reclaiming purposes, e.g., migration.
+ * See lru_gen_add_folio() and lru_gen_del_folio().
+ *
+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
+ * in folio->flags.
+ */
+#define MIN_NR_GENS 2U
+#define MAX_NR_GENS 4U
+
+/*
+ * Each generation is divided into multiple tiers. A page accessed N times
+ * through file descriptors is in tier order_base_2(N). A page in the first tier
+ * (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A page in any other tier (N>1) is marked by
+ * PG_referenced and PG_workingset. This implies a minimum of two tiers is
+ * supported without using additional bits in folio->flags.
+ *
+ * In contrast to moving across generations which requires the LRU lock, moving
+ * across tiers only involves atomic operations on folio->flags and therefore
+ * has a negligible cost in the buffered access path. In the eviction path,
+ * comparisons of refaulted/(evicted+protected) from the first tier and the
+ * rest infer whether pages accessed multiple times through file descriptors
+ * are statistically hot and thus worth protecting.
+ *
+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
+ * folio->flags.
+ */
+#define MAX_NR_TIERS 4U
+
+#ifndef __GENERATING_BOUNDS_H
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+
+#ifdef CONFIG_LRU_GEN
+
+enum {
+ LRU_GEN_ANON,
+ LRU_GEN_FILE,
+};
+
+enum {
+ LRU_GEN_CORE,
+ LRU_GEN_MM_WALK,
+ LRU_GEN_NONLEAF_YOUNG,
+ NR_LRU_GEN_CAPS
+};
+
+#define MIN_LRU_BATCH BITS_PER_LONG
+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
+
+/* whether to keep historical stats from evicted generations */
+#ifdef CONFIG_LRU_GEN_STATS
+#define NR_HIST_GENS MAX_NR_GENS
+#else
+#define NR_HIST_GENS 1U
+#endif
+
+/*
+ * The youngest generation number is stored in max_seq for both anon and file
+ * types as they are aged on an equal footing. The oldest generation numbers are
+ * stored in min_seq[] separately for anon and file types as clean file pages
+ * can be evicted regardless of swap constraints.
+ *
+ * Normally anon and file min_seq are in sync. But if swapping is constrained,
+ * e.g., out of swap space, file min_seq is allowed to advance and leave anon
+ * min_seq behind.
+ *
+ * The number of pages in each generation is eventually consistent and therefore
+ * can be transiently negative when reset_batch_size() is pending.
+ */
+struct lru_gen_struct {
+ /* the aging increments the youngest generation number */
+ unsigned long max_seq;
+ /* the eviction increments the oldest generation numbers */
+ unsigned long min_seq[ANON_AND_FILE];
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+ struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the exponential moving average of evicted+protected */
+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the first tier doesn't need protection, hence the minus one */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can be modified without holding the LRU lock */
+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* whether the multi-gen LRU is enabled */
+ bool enabled;
+};
+
+enum {
+ MM_LEAF_TOTAL, /* total leaf entries */
+ MM_LEAF_OLD, /* old leaf entries */
+ MM_LEAF_YOUNG, /* young leaf entries */
+ MM_NONLEAF_TOTAL, /* total non-leaf entries */
+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
+ NR_MM_STATS
+};
+
+/* double-buffering Bloom filters */
+#define NR_BLOOM_FILTERS 2
+
+struct lru_gen_mm_state {
+ /* set to max_seq after each iteration */
+ unsigned long seq;
+ /* where the current iteration continues (inclusive) */
+ struct list_head *head;
+ /* where the last iteration ended (exclusive) */
+ struct list_head *tail;
+ /* to wait for the last page table walker to finish */
+ struct wait_queue_head wait;
+ /* Bloom filters flip after each iteration */
+ unsigned long *filters[NR_BLOOM_FILTERS];
+ /* the mm stats for debugging */
+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
+ /* the number of concurrent page table walkers */
+ int nr_walkers;
+};
+
+struct lru_gen_mm_walk {
+ /* the lruvec under reclaim */
+ struct lruvec *lruvec;
+ /* unstable max_seq from lru_gen_struct */
+ unsigned long max_seq;
+ /* the next address within an mm to scan */
+ unsigned long next_addr;
+ /* to batch promoted pages */
+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* to batch the mm stats */
+ int mm_stats[NR_MM_STATS];
+ /* total batched items */
+ int batched;
+ bool can_swap;
+ bool force_scan;
+};
+
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+
+#ifdef CONFIG_MEMCG
+void lru_gen_init_memcg(struct mem_cgroup *memcg);
+void lru_gen_exit_memcg(struct mem_cgroup *memcg);
+#endif
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+}
+
+static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+}
+
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+}
+#endif
+
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
/* per lruvec lru_lock for memcg */
@@ -331,6 +522,12 @@ struct lruvec {
unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
+#ifdef CONFIG_LRU_GEN
+ /* evictable pages divided into generations */
+ struct lru_gen_struct lrugen;
+ /* to concurrently iterate lru_gen_mm_list */
+ struct lru_gen_mm_state mm_state;
+#endif
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
@@ -368,13 +565,6 @@ enum zone_watermarks {
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
-/*
- * Shift to encode migratetype and order in the same integer, with order
- * in the least significant bits.
- */
-#define NR_PCP_ORDER_WIDTH 8
-#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
-
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
@@ -627,7 +817,7 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
@@ -639,7 +829,7 @@ struct zone {
spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -676,7 +866,7 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
@@ -746,6 +936,8 @@ static inline bool zone_is_empty(struct zone *zone)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
@@ -953,8 +1145,10 @@ typedef struct pglist_data {
atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
unsigned long nr_reclaim_start; /* nr pages written while throttled
* when throttling started. */
- struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/done() */
+#ifdef CONFIG_MEMORY_HOTPLUG
+ struct mutex kswapd_lock;
+#endif
+ struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
@@ -982,7 +1176,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -996,6 +1190,21 @@ typedef struct pglist_data {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /* start time in ms of current promote rate limit period */
+ unsigned int nbp_rl_start;
+ /* number of promote candidate pages at start time of current rate limit period */
+ unsigned long nbp_rl_nr_cand;
+ /* promote threshold in ms */
+ unsigned int nbp_threshold;
+ /* start time in ms of current promote threshold adjustment period */
+ unsigned int nbp_th_start;
+ /*
+ * number of promote candidate pages at stat time of current promote
+ * threshold adjustment period
+ */
+ unsigned long nbp_th_nr_cand;
+#endif
/* Fields commonly accessed by the page reclaim scanner */
/*
@@ -1007,11 +1216,19 @@ typedef struct pglist_data {
unsigned long flags;
- ZONE_PADDING(_pad2_)
+#ifdef CONFIG_LRU_GEN
+ /* kswap mm walk data */
+ struct lru_gen_mm_walk mm_walk;
+#endif
+
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -1025,11 +1242,6 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
-static inline bool pgdat_is_empty(pg_data_t *pgdat)
-{
- return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
-}
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
diff --git a/include/linux/module.h b/include/linux/module.h
index 518296ea7f73..ec61fb53979a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -27,7 +27,6 @@
#include <linux/tracepoint-defs.h>
#include <linux/srcu.h>
#include <linux/static_call_types.h>
-#include <linux/cfi.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -132,7 +131,7 @@ extern void cleanup_module(void);
{ return initfn; } \
int init_module(void) __copy(initfn) \
__attribute__((alias(#initfn))); \
- __CFI_ADDRESSABLE(init_module, __initdata);
+ ___ADDRESSABLE(init_module, __initdata);
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
@@ -140,7 +139,7 @@ extern void cleanup_module(void);
{ return exitfn; } \
void cleanup_module(void) __copy(exitfn) \
__attribute__((alias(#exitfn))); \
- __CFI_ADDRESSABLE(cleanup_module, __exitdata);
+ ___ADDRESSABLE(cleanup_module, __exitdata);
#endif
@@ -387,8 +386,9 @@ struct module {
const s32 *crcs;
unsigned int num_syms;
-#ifdef CONFIG_CFI_CLANG
- cfi_check_fn cfi_check;
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+ s32 *kcfi_traps;
+ s32 *kcfi_traps_end;
#endif
/* Kernel parameters. */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 6cbbfe94348c..80b8400ab8b2 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -17,7 +17,7 @@ static inline int ip_mroute_opt(int opt)
}
int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
-int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void);
@@ -29,8 +29,8 @@ static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
return -ENOPROTOOPT;
}
-static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
- char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sk, int optname,
+ sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index bc351a85ce9b..8f2b307fb124 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -27,7 +27,7 @@ struct sock;
#ifdef CONFIG_IPV6_MROUTE
extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
-extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
@@ -42,7 +42,7 @@ static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
static inline
int ip6_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+ int optname, sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 955aee14b0f7..7c58c44662b8 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -40,6 +40,12 @@ struct mtd_erase_region_info {
unsigned long *lockmap; /* If keeping bitmap of locks */
};
+struct mtd_req_stats {
+ unsigned int uncorrectable_errors;
+ unsigned int corrected_bitflips;
+ unsigned int max_bitflips;
+};
+
/**
* struct mtd_oob_ops - oob operation operands
* @mode: operation mode
@@ -70,6 +76,7 @@ struct mtd_oob_ops {
uint32_t ooboffs;
uint8_t *datbuf;
uint8_t *oobbuf;
+ struct mtd_req_stats *stats;
};
/**
@@ -677,6 +684,7 @@ extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np);
extern struct mtd_info *get_mtd_device_nm(const char *name);
extern void put_mtd_device(struct mtd_info *mtd);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index caeb08a98536..00fee52df842 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -83,7 +83,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern int __must_check nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 05d6f3facd5a..a36edb0ec199 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -253,11 +253,17 @@ struct netdev_hw_addr_list {
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
+#define netdev_for_each_synced_uc_addr(_ha, _dev) \
+ netdev_for_each_uc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
+#define netdev_for_each_synced_mc_addr(_ha, _dev) \
+ netdev_for_each_mc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
struct hh_cache {
unsigned int hh_len;
@@ -546,8 +552,8 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
{
unsigned long val, new;
+ val = READ_ONCE(n->state);
do {
- val = READ_ONCE(n->state);
if (val & NAPIF_STATE_DISABLE)
return true;
@@ -555,7 +561,7 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
return false;
new = val | NAPIF_STATE_MISSED;
- } while (cmpxchg(&n->state, val, new) != val);
+ } while (!try_cmpxchg(&n->state, &val, new));
return true;
}
@@ -934,6 +940,7 @@ struct net_device_path_ctx {
};
enum tc_setup_type {
+ TC_QUERY_CAPS,
TC_SETUP_QDISC_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
@@ -1851,7 +1858,6 @@ enum netdev_ml_priv_type {
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
- * @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
@@ -2147,9 +2153,6 @@ struct net_device {
#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
#endif
-#if IS_ENABLED(CONFIG_DECNET)
- struct dn_dev __rcu *dn_ptr;
-#endif
#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
#endif
@@ -2551,16 +2554,15 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
* @dev: network device
* @napi: NAPI context
* @poll: polling function
- * @weight: default weight
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
static inline void
netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
+ int (*poll)(struct napi_struct *, int))
{
- netif_napi_add_weight(dev, napi, poll, weight);
+ netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
static inline void
@@ -2573,8 +2575,6 @@ netif_napi_add_tx_weight(struct net_device *dev,
netif_napi_add_weight(dev, napi, poll, weight);
}
-#define netif_tx_napi_add netif_napi_add_tx_weight
-
/**
* netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
* @dev: network device
@@ -3357,6 +3357,16 @@ static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_qu
#endif
}
+/**
+ * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
+ * @dev_queue: network device queue
+ * @bytes: number of bytes queued to the device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
+ */
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
@@ -3402,13 +3412,14 @@ static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
}
/**
- * netdev_sent_queue - report the number of bytes queued to hardware
- * @dev: network device
- * @bytes: number of bytes queued to the hardware device queue
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
*
- * Report the number of bytes queued for sending/completion to the network
- * device hardware queue. @bytes should be a good approximation and should
- * exactly match netdev_completed_queue() @bytes
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue#0. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
@@ -3423,6 +3434,15 @@ static inline bool __netdev_sent_queue(struct net_device *dev,
xmit_more);
}
+/**
+ * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
+ * @dev_queue: network device queue
+ * @pkts: number of packets (currently ignored)
+ * @bytes: number of bytes dequeued from the device queue
+ *
+ * Must be called at most once per TX completion round (and not per
+ * individual packet), so that BQL can adjust its limits appropriately.
+ */
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3643,9 +3663,8 @@ static inline bool netif_attr_test_online(unsigned long j,
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpu_max_bits_warn(n, nr_bits);
+ /* n is a prior cpu */
+ cpu_max_bits_warn(n + 1, nr_bits);
if (srcp)
return find_next_bit(srcp, nr_bits, n + 1);
@@ -3666,9 +3685,8 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
unsigned int nr_bits)
{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpu_max_bits_warn(n, nr_bits);
+ /* n is a prior cpu */
+ cpu_max_bits_warn(n + 1, nr_bits);
if (src1p && src2p)
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
@@ -3802,6 +3820,7 @@ void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
+void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index c2c6f332fb90..d8817d381c14 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -243,11 +243,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
-#if IS_ENABLED(CONFIG_DECNET)
- case NFPROTO_DECNET:
- hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
- break;
-#endif
default:
WARN_ON_ONCE(1);
break;
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index 8dddfb151f00..a5f7bef1b3a4 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -7,14 +7,6 @@
/* in/out/forward only */
#define NF_ARP_NUMHOOKS 3
-/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
-#define NF_DN_NUMHOOKS 7
-
-#if IS_ENABLED(CONFIG_DECNET)
-/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS NF_DN_NUMHOOKS
-#else
#define NF_MAX_HOOKS NF_INET_NUMHOOKS
-#endif
#endif
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index bda1c385cffb..d51e041d2242 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -71,6 +71,8 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
* %NL_SET_ERR_MSG
* @bad_attr: attribute with error
* @policy: policy for a bad attribute
+ * @miss_type: attribute type which was missing
+ * @miss_nest: nest missing an attribute (%NULL if missing top level attr)
* @cookie: cookie data to return to userspace (for success)
* @cookie_len: actual cookie data length
*/
@@ -78,6 +80,8 @@ struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
const struct nla_policy *policy;
+ const struct nlattr *miss_nest;
+ u16 miss_type;
u8 cookie[NETLINK_MAX_COOKIE_LEN];
u8 cookie_len;
};
@@ -126,6 +130,26 @@ struct netlink_ext_ack {
#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \
NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg)
+#define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) { \
+ __extack->miss_nest = (nest); \
+ __extack->miss_type = (type); \
+ } \
+} while (0)
+
+#define NL_REQ_ATTR_CHECK(extack, nest, tb, type) ({ \
+ struct nlattr **__tb = (tb); \
+ u32 __attr = (type); \
+ int __retval; \
+ \
+ __retval = !__tb[__attr]; \
+ if (__retval) \
+ NL_SET_ERR_ATTR_MISS((extack), (nest), __attr); \
+ __retval; \
+})
+
static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
u64 cookie)
{
diff --git a/include/linux/node.h b/include/linux/node.h
index 40d641a8bfb0..427a5975cf40 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -2,15 +2,15 @@
/*
* include/linux/node.h - generic node definition
*
- * This is mainly for topological representation. We define the
- * basic 'struct node' here, which can be embedded in per-arch
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/node.c
- * and system devices are handled in drivers/base/sys.c.
+ * and system devices are handled in drivers/base/sys.c.
*
* Nodes are exported via driverfs in the class/node/devices/
- * directory.
+ * directory.
*/
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/cpumask.h>
#include <linux/list.h>
-#include <linux/workqueue.h>
/**
* struct node_hmem_attrs - heterogeneous memory performance attributes
@@ -84,10 +83,6 @@ static inline void node_set_perf_attrs(unsigned int nid,
struct node {
struct device dev;
struct list_head access_list;
-
-#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
- struct work_struct node_work;
-#endif
#ifdef CONFIG_HMEM_REPORTING
struct list_head cache_attrs;
struct device *cache_dev;
@@ -96,7 +91,6 @@ struct node {
struct memory_block;
extern struct node *node_devices[];
-typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
@@ -144,11 +138,6 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
unsigned access);
-
-#ifdef CONFIG_HUGETLBFS
-extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister);
-#endif
#else
static inline void node_dev_init(void)
{
@@ -176,18 +165,8 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}
-
-static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
- node_registration_func_t unreg)
-{
-}
#endif
#define to_node(device) container_of(device, struct node, dev)
-static inline bool node_is_toptier(int node)
-{
- return node_state(node, N_CPU);
-}
-
#endif /* _LINUX_NODE_H_ */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 4b71a96190a8..378956c93c94 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -493,6 +493,7 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
+#define next_memory_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1U
#define nr_online_nodes 1U
@@ -504,12 +505,20 @@ static inline int num_node_state(enum node_states state)
static inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
- int w, bit = NUMA_NO_NODE;
+ int w, bit;
w = nodes_weight(*maskp);
- if (w)
- bit = bitmap_ord_to_pos(maskp->bits,
- get_random_int() % w, MAX_NUMNODES);
+ switch (w) {
+ case 0:
+ bit = NUMA_NO_NODE;
+ break;
+ case 1:
+ bit = first_node(*maskp);
+ break;
+ default:
+ bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_int() % w);
+ break;
+ }
return bit;
#else
return 0;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index ae53d74f3696..050d7d0cd81b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1482,8 +1482,8 @@ struct nvmf_connect_command {
};
enum {
- NVME_CONNECT_AUTHREQ_ASCR = (1 << 2),
- NVME_CONNECT_AUTHREQ_ATR = (1 << 1),
+ NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
+ NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
};
struct nvmf_connect_data {
diff --git a/include/linux/of.h b/include/linux/of.h
index 766d002bddb9..6b79ef9a6541 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -342,7 +342,7 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_compatible_match(struct device_node *device,
+extern int of_device_compatible_match(const struct device_node *device,
const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
@@ -562,7 +562,7 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
-static inline int of_device_compatible_match(struct device_node *device,
+static inline int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
return 0;
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 83fccd0c9bba..d6d3eae2f145 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -37,9 +37,8 @@ extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern void of_irq_init(const struct of_device_id *matches);
-
#ifdef CONFIG_OF_IRQ
+extern void of_irq_init(const struct of_device_id *matches);
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
@@ -57,6 +56,9 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
extern void of_msi_configure(struct device *dev, struct device_node *np);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
+static inline void of_irq_init(const struct of_device_id *matches)
+{
+}
static inline int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
diff --git a/include/linux/once.h b/include/linux/once.h
index b14d8b309d52..bc714d414448 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -5,10 +5,18 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags, struct module *mod);
+/* Variant for process contexts only. */
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod);
+
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
* once, where DO_ONCE() can live in the fast-path. After @func has
@@ -52,7 +60,27 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
___ret; \
})
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLEEPABLE(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool __section(".data.once") ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
+ ___ret = __do_once_sleepable_start(&___done); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_sleepable_done(&___done, &___once_key,\
+ THIS_MODULE); \
+ } \
+ } \
+ ___ret; \
+ })
+
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
+#define get_random_sleepable_once(buf, nbytes) \
+ DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
+
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 02d1e7bbd8cd..7d0c9c48a0c5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -78,15 +78,6 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
}
/*
- * Use this helper if tsk->mm != mm and the victim mm needs a special
- * handling. This is guaranteed to stay true after once set.
- */
-static inline bool mm_is_oom_victim(struct mm_struct *mm)
-{
- return test_bit(MMF_OOM_VICTIM, &mm->flags);
-}
-
-/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
@@ -106,8 +97,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
return 0;
}
-bool __oom_reap_task_mm(struct mm_struct *mm);
-
long oom_badness(struct task_struct *p,
unsigned long totalpages);
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0eb3b192f07a..19dfdd74835e 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -51,40 +51,50 @@ static inline bool __must_check __must_check_overflow(bool overflow)
return unlikely(overflow);
}
-/*
- * For simplicity and code hygiene, the fallback code below insists on
- * a, b and *d having the same type (similar to the min() and max()
- * macros), whereas gcc's type-generic overflow checkers accept
- * different types. Hence we don't just make check_add_overflow an
- * alias for __builtin_add_overflow, but add type checks similar to
- * below.
+/** check_add_overflow() - Calculate addition with overflow checking
+ *
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted addition, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * sum has overflowed or been truncated.
*/
-#define check_add_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_add_overflow(__a, __b, __d); \
-}))
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
-#define check_sub_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_sub_overflow(__a, __b, __d); \
-}))
+/** check_sub_overflow() - Calculate subtraction with overflow checking
+ *
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted subtraction, but is not considered
+ * "safe for use" on a non-zero return value, which indicates that the
+ * difference has underflowed or been truncated.
+ */
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
-#define check_mul_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_mul_overflow(__a, __b, __d); \
-}))
+/** check_mul_overflow() - Calculate multiplication with overflow checking
+ *
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
+ *
+ * Returns 0 on success.
+ *
+ * *@d holds the results of the attempted multiplication, but is not
+ * considered "safe for use" on a non-zero return value, which indicates
+ * that the product has overflowed or been truncated.
+ */
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
/** check_shl_overflow() - Calculate a left-shifted value and check overflow
*
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index ef1e3e736e14..7d79818dc065 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -55,7 +55,8 @@
#define SECTIONS_WIDTH 0
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
#error "Vmemmap: No space for nodes field in page flags"
@@ -89,8 +90,8 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \
- <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
@@ -100,10 +101,15 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \
- > BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error "Not enough bits in page flags"
#endif
+/* see the comment on MAX_NR_TIERS */
+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
+
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 465ff35a8c00..0b0ae5084e60 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1058,7 +1058,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -1069,7 +1069,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (PAGEFLAGS_MASK & ~__PG_HWPOISON)
+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 679591301994..c141ea9a95ef 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -3,15 +3,17 @@
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
+#include <linux/cache.h>
#include <linux/kernel.h>
#include <asm/page.h>
struct page_counter {
+ /*
+ * Make sure 'usage' does not share cacheline with any other field. The
+ * memcg->memory.usage is a hot member of struct mem_cgroup.
+ */
atomic_long_t usage;
- unsigned long min;
- unsigned long low;
- unsigned long high;
- unsigned long max;
+ CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -23,18 +25,18 @@ struct page_counter {
atomic_long_t low_usage;
atomic_long_t children_low_usage;
- /* legacy */
unsigned long watermark;
unsigned long failcnt;
- /*
- * 'parent' is placed here to be far from 'usage' to reduce
- * cache false sharing, as 'usage' is written mostly while
- * parent is frequently read for cgroup's hierarchical
- * counting nature.
- */
+ /* Keep all the read most fields in a separete cacheline. */
+ CACHELINE_PADDING(_pad2_);
+
+ unsigned long min;
+ unsigned long low;
+ unsigned long high;
+ unsigned long max;
struct page_counter *parent;
-};
+} ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX LONG_MAX
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index fabb2e1e087f..22be4582faae 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -36,9 +36,15 @@ struct page_ext {
unsigned long flags;
};
+extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+static inline bool early_page_ext_enabled(void)
+{
+ return early_page_ext;
+}
+
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
@@ -55,7 +61,8 @@ static inline void page_ext_init(void)
}
#endif
-struct page_ext *lookup_page_ext(const struct page *page);
+extern struct page_ext *page_ext_get(struct page *page);
+extern void page_ext_put(struct page_ext *page_ext);
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@@ -67,13 +74,13 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
-static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+static inline bool early_page_ext_enabled(void)
{
+ return false;
}
-static inline struct page_ext *lookup_page_ext(const struct page *page)
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
- return NULL;
}
static inline void page_ext_init(void)
@@ -87,5 +94,14 @@ static inline void page_ext_init_flatmem_late(void)
static inline void page_ext_init_flatmem(void)
{
}
+
+static inline struct page_ext *page_ext_get(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_put(struct page_ext *page_ext)
+{
+}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 4663dfed1293..5cb7bd2078ec 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -13,65 +13,79 @@
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-
static inline bool folio_test_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
}
static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
static inline bool folio_test_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_idle;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_idle;
}
static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
#endif /* !CONFIG_64BIT */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 83c7248053a1..5f1ae07d724b 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -53,6 +53,10 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */
struct page;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0178b2040ea3..bbccb4044222 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -718,8 +718,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
@@ -989,19 +989,16 @@ static inline int lock_page_killable(struct page *page)
}
/*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
-static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags)
+static inline bool folio_lock_or_retry(struct folio *folio,
+ struct mm_struct *mm, unsigned int flags)
{
- struct folio *folio;
might_sleep();
-
- folio = page_folio(page);
return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
}
@@ -1042,7 +1039,6 @@ static inline int wait_on_page_locked_killable(struct page *page)
return folio_wait_locked_killable(page_folio(page));
}
-int folio_put_wait_locked(struct folio *folio, int state);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
@@ -1173,6 +1169,8 @@ struct readahead_control {
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
+ bool _workingset;
+ unsigned long _pflags;
};
#define DEFINE_READAHEAD(ractl, f, r, m, i) \
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index ac7b38ad5903..f3fafb731ffd 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -15,12 +15,12 @@ struct mm_walk;
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (lowest-level)
- * entry
+ * @pte_entry: if set, called for each PTE (lowest-level) entry,
+ * including empty ones
* @pte_hole: if set, called for each hole at all levels,
- * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
- * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal
- * to 1) are skipped.
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
+ * Any folded depths (where PTRS_PER_P?D is equal to 1)
+ * are skipped.
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 060af91bafcd..2bda4a4e47e8 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -475,6 +475,7 @@ struct pci_dev {
unsigned int broken_cmd_compl:1; /* No compl for some cmds */
#endif
#ifdef CONFIG_PCIE_PTM
+ u16 ptm_cap; /* PTM Capability */
unsigned int ptm_root:1;
unsigned int ptm_enabled:1;
u8 ptm_granularity;
@@ -1677,10 +1678,12 @@ bool pci_ats_disabled(void);
#ifdef CONFIG_PCIE_PTM
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+void pci_disable_ptm(struct pci_dev *dev);
bool pcie_ptm_enabled(struct pci_dev *dev);
#else
static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{ return -EINVAL; }
+static inline void pci_disable_ptm(struct pci_dev *dev) { }
static inline bool pcie_ptm_enabled(struct pci_dev *dev)
{ return false; }
#endif
@@ -2019,8 +2022,8 @@ enum pci_fixup_pass {
#ifdef CONFIG_LTO_CLANG
#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook, stub) \
- void __cficanonical stub(struct pci_dev *dev); \
- void __cficanonical stub(struct pci_dev *dev) \
+ void stub(struct pci_dev *dev); \
+ void stub(struct pci_dev *dev) \
{ \
hook(dev); \
} \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 15b49e655ce3..b362d90eb9b0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -75,6 +75,9 @@
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+/* Interface for SERIAL/MODEM */
+#define PCI_SERIAL_16550_COMPATIBLE 0x02
+
#define PCI_BASE_CLASS_SYSTEM 0x08
#define PCI_CLASS_SYSTEM_PIC 0x0800
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
diff --git a/include/linux/pcs-altera-tse.h b/include/linux/pcs-altera-tse.h
new file mode 100644
index 000000000000..92ab9f08e835
--- /dev/null
+++ b/include/linux/pcs-altera-tse.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Bootlin
+ *
+ * Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_ALTERA_TSE_H
+#define __LINUX_PCS_ALTERA_TSE_H
+
+struct phylink_pcs;
+struct net_device;
+
+struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
+ void __iomem *pcs_base, int reg_width);
+
+#endif /* __LINUX_PCS_ALTERA_TSE_H */
diff --git a/include/linux/pe.h b/include/linux/pe.h
index daf09ffffe38..1d3836ef9d92 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -65,6 +65,8 @@
#define IMAGE_FILE_MACHINE_SH5 0x01a8
#define IMAGE_FILE_MACHINE_THUMB 0x01c2
#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
+#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232
+#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264
/* flags */
#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5fda40f97fe9..36b942b67b7d 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -121,9 +121,15 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
+extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
+static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
+{
+ return atomic_read(&sem->block);
+}
+
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 01861eebed79..8ed5fba6d156 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -15,6 +15,9 @@
#include <linux/types.h>
#include <linux/gfp.h>
+/* percpu_counter batch for local add or sub */
+#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
+
#ifdef CONFIG_SMP
struct percpu_counter {
@@ -56,6 +59,22 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+/*
+ * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
+ * are accumulated in local per cpu counter and not in fbc->count until
+ * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
+ * write efficient.
+ * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
+ * used to add up the counts from each CPU to account for all the local
+ * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
+ * should be used when a counter is updated frequently and read rarely.
+ */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
+}
+
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
@@ -138,6 +157,13 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
preempt_enable();
}
+/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, amount);
+}
+
static inline void
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
@@ -193,4 +219,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
percpu_counter_add(fbc, -amount);
}
+static inline void
+percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_local(fbc, -amount);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 0407a38b470a..0356cb6a215d 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -24,10 +24,11 @@
/*
* ARM PMU hw_event flags
*/
-/* Event uses a 64bit counter */
-#define ARMPMU_EVT_64BIT 1
-/* Event uses a 47bit counter */
-#define ARMPMU_EVT_47BIT 2
+#define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
+#define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
+
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index bf66fe011fa8..e17e86ad6f3a 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -45,7 +45,7 @@ struct riscv_pmu {
irqreturn_t (*handle_irq)(int irq_num, void *dev);
- int num_counters;
+ unsigned long cmask;
u64 (*ctr_read)(struct perf_event *event);
int (*ctr_get_idx)(struct perf_event *event);
int (*ctr_get_width)(int idx);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ee8b9ecdc03b..853f64b6c8c2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -36,6 +36,7 @@ struct perf_guest_info_callbacks {
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <linux/rhashtable-types.h>
#include <asm/hw_breakpoint.h>
#endif
@@ -60,6 +61,7 @@ struct perf_guest_info_callbacks {
#include <linux/refcount.h>
#include <linux/security.h>
#include <linux/static_call.h>
+#include <linux/lockdep.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -137,9 +139,11 @@ struct hw_perf_event_extra {
* PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
* usage.
*/
-#define PERF_EVENT_FLAG_ARCH 0x0000ffff
+#define PERF_EVENT_FLAG_ARCH 0x000fffff
#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
+
/**
* struct hw_perf_event - performance event hardware details:
*/
@@ -178,7 +182,7 @@ struct hw_perf_event {
* creation and event initalization.
*/
struct arch_hw_breakpoint info;
- struct list_head bp_list;
+ struct rhlist_head bp_list;
};
#endif
struct { /* amd_iommu */
@@ -631,7 +635,23 @@ struct pmu_event_list {
struct list_head list;
};
+/*
+ * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
+ * as such iteration must hold either lock. However, since ctx->lock is an IRQ
+ * safe lock, and is only held by the CPU doing the modification, having IRQs
+ * disabled is sufficient since it will hold-off the IPIs.
+ */
+#ifdef CONFIG_PROVE_LOCKING
+#define lockdep_assert_event_ctx(event) \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (this_cpu_read(hardirqs_enabled) && \
+ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
+#else
+#define lockdep_assert_event_ctx(event)
+#endif
+
#define for_each_sibling_event(sibling, event) \
+ lockdep_assert_event_ctx(event); \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
@@ -1007,18 +1027,20 @@ struct perf_sample_data {
* Fields set by perf_sample_data_init(), group so as to
* minimize the cachelines touched.
*/
- u64 addr;
- struct perf_raw_record *raw;
- struct perf_branch_stack *br_stack;
+ u64 sample_flags;
u64 period;
- union perf_sample_weight weight;
- u64 txn;
- union perf_mem_data_src data_src;
/*
* The other fields, optionally {set,used} by
* perf_{prepare,output}_sample().
*/
+ struct perf_branch_stack *br_stack;
+ union perf_sample_weight weight;
+ union perf_mem_data_src data_src;
+ u64 txn;
+ u64 addr;
+ struct perf_raw_record *raw;
+
u64 type;
u64 ip;
struct {
@@ -1056,13 +1078,13 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
/* remaining struct members initialized in perf_prepare_sample() */
- data->addr = addr;
- data->raw = NULL;
- data->br_stack = NULL;
+ data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period;
- data->weight.full = 0;
- data->data_src.val = PERF_MEM_NA;
- data->txn = 0;
+
+ if (addr) {
+ data->addr = addr;
+ data->sample_flags |= PERF_SAMPLE_ADDR;
+ }
}
/*
@@ -1078,6 +1100,7 @@ static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *b
br->abort = 0;
br->cycles = 0;
br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
br->reserved = 0;
}
@@ -1684,4 +1707,30 @@ static inline void perf_lopwr_cb(bool mode)
}
#endif
+#ifdef CONFIG_PERF_EVENTS
+static inline bool branch_sample_no_flags(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
+}
+
+static inline bool branch_sample_no_cycles(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
+}
+
+static inline bool branch_sample_type(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
+}
+
+static inline bool branch_sample_hw_index(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
+}
+
+static inline bool branch_sample_priv(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
+}
+#endif /* CONFIG_PERF_EVENTS */
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 014ee8f0fbaa..a108b60a6962 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -213,7 +213,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
@@ -234,7 +234,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
BUILD_BUG();
return 0;
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
@@ -260,6 +260,19 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported on the local CPU.
+ *
+ * This stub assumes accessing through an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(void)
+{
+ return false;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -1276,8 +1289,7 @@ static inline int pgd_devmap(pgd_t pgd)
#endif
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
{
return 0;
@@ -1598,11 +1610,7 @@ typedef unsigned int pgtbl_mod_mask;
#endif
#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
+#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
/*
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 87638c55d844..ddf66198f751 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -115,6 +115,8 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_25GBASER: 25G BaseR
* @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
* @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
+ * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
* @PHY_INTERFACE_MODE_MAX: Book keeping
*
* Describes the interface between the MAC and PHY.
@@ -152,6 +154,8 @@ typedef enum {
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_QUSGMII,
+ PHY_INTERFACE_MODE_1000BASEKX,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -249,6 +253,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "trgmii";
case PHY_INTERFACE_MODE_1000BASEX:
return "1000base-x";
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return "1000base-kx";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
case PHY_INTERFACE_MODE_5GBASER:
@@ -267,12 +273,13 @@ static inline const char *phy_modes(phy_interface_t interface)
return "10gbase-kr";
case PHY_INTERFACE_MODE_100BASEX:
return "100base-x";
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return "qusgmii";
default:
return "unknown";
}
}
-
#define PHY_INIT_TIMEOUT 100000
#define PHY_FORCE_TIMEOUT 10
@@ -564,8 +571,10 @@ struct macsec_ops;
* @advertising: Currently advertised linkmodes
* @adv_old: Saved advertised while power saving for WoL
* @lp_advertising: Current link partner advertised linkmodes
+ * @host_interfaces: PHY interface modes supported by host
* @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
* @autoneg: Flag autoneg being used
+ * @rate_matching: Current rate matching mode
* @link: Current link state
* @autoneg_complete: Flag auto negotiation of the link has completed
* @mdix: Current crossover
@@ -588,6 +597,7 @@ struct macsec_ops;
* @master_slave_get: Current master/slave advertisement
* @master_slave_state: Current master/slave configuration
* @mii_ts: Pointer to time stamper callbacks
+ * @psec: Pointer to Power Sourcing Equipment control struct
* @lock: Mutex for serialization access to PHY
* @state_queue: Work queue for state machine
* @shared: Pointer to private data shared by phys in one package
@@ -633,6 +643,8 @@ struct phy_device {
unsigned irq_suspended:1;
unsigned irq_rerun:1;
+ int rate_matching;
+
enum phy_state state;
u32 dev_flags;
@@ -660,6 +672,9 @@ struct phy_device {
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
+
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
@@ -701,6 +716,7 @@ struct phy_device {
struct phylink *phylink;
struct net_device *attached_dev;
struct mii_timestamper *mii_ts;
+ struct pse_control *psec;
u8 mdix;
u8 mdix_ctrl;
@@ -797,6 +813,21 @@ struct phy_driver {
*/
int (*get_features)(struct phy_device *phydev);
+ /**
+ * @get_rate_matching: Get the supported type of rate matching for a
+ * particular phy interface. This is used by phy consumers to determine
+ * whether to advertise lower-speed modes for that interface. It is
+ * assumed that if a rate matching mode is supported on an interface,
+ * then that interface's rate can be adapted to all slower link speeds
+ * supported by the phy. If iface is %PHY_INTERFACE_MODE_NA, and the phy
+ * supports any kind of rate matching for any interface, then it must
+ * return that rate matching mode (preferring %RATE_MATCH_PAUSE to
+ * %RATE_MATCH_CRS). If the interface is not supported, this should
+ * return %RATE_MATCH_NONE.
+ */
+ int (*get_rate_matching)(struct phy_device *phydev,
+ phy_interface_t iface);
+
/* PHY Power Management */
/** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
@@ -963,6 +994,9 @@ struct phy_fixup {
const char *phy_speed_to_str(int speed);
const char *phy_duplex_to_str(unsigned int duplex);
+const char *phy_rate_matching_to_str(int rate_matching);
+
+int phy_interface_num_ports(phy_interface_t interface);
/* A structure for mapping a particular speed and duplex
* combination to a particular SUPPORTED and ADVERTISED value
@@ -1677,6 +1711,8 @@ int phy_disable_interrupts(struct phy_device *phydev);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface);
void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
diff --git a/include/linux/phy/pcie.h b/include/linux/phy/pcie.h
new file mode 100644
index 000000000000..e7ac81764576
--- /dev/null
+++ b/include/linux/phy/pcie.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __PHY_PCIE_H
+#define __PHY_PCIE_H
+
+#define PHY_MODE_PCIE_RC 20
+#define PHY_MODE_PCIE_EP 21
+#define PHY_MODE_PCIE_BIFURCATION 22
+
+#endif
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 3a35e74cdc61..70998e6dd6fd 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef PHY_TEGRA_XUSB_H
@@ -21,6 +21,8 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
bool val);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy);
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 6d06896fc20d..664dd409feb9 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -21,6 +21,35 @@ enum {
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+ /* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
+ * autonegotiation advertisement. They correspond to the PAUSE and
+ * ASM_DIR bits defined by 802.3, respectively.
+ *
+ * The following table lists the values of tx_pause and rx_pause which
+ * might be requested in mac_link_up. The exact values depend on either
+ * the results of autonegotation (if MLO_PAUSE_AN is set) or user
+ * configuration (if MLO_PAUSE_AN is not set).
+ *
+ * MAC_SYM_PAUSE MAC_ASYM_PAUSE MLO_PAUSE_AN tx_pause/rx_pause
+ * ============= ============== ============ ==================
+ * 0 0 0 0/0
+ * 0 0 1 0/0
+ * 0 1 0 0/0, 0/1, 1/0, 1/1
+ * 0 1 1 0/0, 1/0
+ * 1 0 0 0/0, 1/1
+ * 1 0 1 0/0, 1/1
+ * 1 1 0 0/0, 0/1, 1/0, 1/1
+ * 1 1 1 0/0, 0/1, 1/1
+ *
+ * If you set MAC_ASYM_PAUSE, the user may request any combination of
+ * tx_pause and rx_pause. You do not have to support these
+ * combinations.
+ *
+ * However, you should support combinations of tx_pause and rx_pause
+ * which might be the result of autonegotation. For example, don't set
+ * MAC_SYM_PAUSE unless your device can support tx_pause and rx_pause
+ * at the same time.
+ */
MAC_SYM_PAUSE = BIT(0),
MAC_ASYM_PAUSE = BIT(1),
MAC_10HD = BIT(2),
@@ -59,6 +88,10 @@ static inline bool phylink_autoneg_inband(unsigned int mode)
* @speed: link speed, one of the SPEED_* constants.
* @duplex: link duplex mode, one of DUPLEX_* constants.
* @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @rate_matching: rate matching being performed, one of the RATE_MATCH_*
+ * constants. If rate matching is taking place, then the speed/duplex of
+ * the medium link mode (@speed and @duplex) and the speed/duplex of the phy
+ * interface mode (@interface) are different.
* @link: true if the link is up.
* @an_enabled: true if autonegotiation is enabled/desired.
* @an_complete: true if autonegotiation has completed.
@@ -70,6 +103,7 @@ struct phylink_link_state {
int speed;
int duplex;
int pause;
+ int rate_matching;
unsigned int link:1;
unsigned int an_enabled:1;
unsigned int an_complete:1;
@@ -518,8 +552,10 @@ void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed, int duplex);
#endif
-void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
- unsigned long mac_capabilities);
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps);
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching);
void phylink_generic_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state);
diff --git a/include/linux/platform_data/adp5588.h b/include/linux/platform_data/adp5588.h
deleted file mode 100644
index 6d3f7d911a92..000000000000
--- a/include/linux/platform_data/adp5588.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- */
-
-#ifndef _ADP5588_H
-#define _ADP5588_H
-
-#define DEV_ID 0x00 /* Device ID */
-#define CFG 0x01 /* Configuration Register1 */
-#define INT_STAT 0x02 /* Interrupt Status Register */
-#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
-#define Key_EVENTA 0x04 /* Key Event Register A */
-#define Key_EVENTB 0x05 /* Key Event Register B */
-#define Key_EVENTC 0x06 /* Key Event Register C */
-#define Key_EVENTD 0x07 /* Key Event Register D */
-#define Key_EVENTE 0x08 /* Key Event Register E */
-#define Key_EVENTF 0x09 /* Key Event Register F */
-#define Key_EVENTG 0x0A /* Key Event Register G */
-#define Key_EVENTH 0x0B /* Key Event Register H */
-#define Key_EVENTI 0x0C /* Key Event Register I */
-#define Key_EVENTJ 0x0D /* Key Event Register J */
-#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
-#define UNLOCK1 0x0F /* Unlock Key1 */
-#define UNLOCK2 0x10 /* Unlock Key2 */
-#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
-#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
-#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
-#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
-#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
-#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
-#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
-#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
-#define GPI_EM1 0x20 /* GPI Event Mode 1 */
-#define GPI_EM2 0x21 /* GPI Event Mode 2 */
-#define GPI_EM3 0x22 /* GPI Event Mode 3 */
-#define GPIO_DIR1 0x23 /* GPIO Data Direction */
-#define GPIO_DIR2 0x24 /* GPIO Data Direction */
-#define GPIO_DIR3 0x25 /* GPIO Data Direction */
-#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
-#define Debounce_DIS1 0x29 /* Debounce Disable */
-#define Debounce_DIS2 0x2A /* Debounce Disable */
-#define Debounce_DIS3 0x2B /* Debounce Disable */
-#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
-#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
-#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
-#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
-#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
-#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
-#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
-#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
-#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
-#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
-#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
-#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
-#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
-#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
-#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
-#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
-#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
-#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
-
-#define ADP5588_DEVICE_ID_MASK 0xF
-
- /* Configuration Register1 */
-#define ADP5588_AUTO_INC (1 << 7)
-#define ADP5588_GPIEM_CFG (1 << 6)
-#define ADP5588_OVR_FLOW_M (1 << 5)
-#define ADP5588_INT_CFG (1 << 4)
-#define ADP5588_OVR_FLOW_IEN (1 << 3)
-#define ADP5588_K_LCK_IM (1 << 2)
-#define ADP5588_GPI_IEN (1 << 1)
-#define ADP5588_KE_IEN (1 << 0)
-
-/* Interrupt Status Register */
-#define ADP5588_CMP2_INT (1 << 5)
-#define ADP5588_CMP1_INT (1 << 4)
-#define ADP5588_OVR_FLOW_INT (1 << 3)
-#define ADP5588_K_LCK_INT (1 << 2)
-#define ADP5588_GPI_INT (1 << 1)
-#define ADP5588_KE_INT (1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define ADP5588_K_LCK_EN (1 << 6)
-#define ADP5588_LCK21 0x30
-#define ADP5588_KEC 0xF
-
-#define ADP5588_MAXGPIO 18
-#define ADP5588_BANK(offs) ((offs) >> 3)
-#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
-
-/* Put one of these structures in i2c_board_info platform_data */
-
-#define ADP5588_KEYMAPSIZE 80
-
-#define GPI_PIN_ROW0 97
-#define GPI_PIN_ROW1 98
-#define GPI_PIN_ROW2 99
-#define GPI_PIN_ROW3 100
-#define GPI_PIN_ROW4 101
-#define GPI_PIN_ROW5 102
-#define GPI_PIN_ROW6 103
-#define GPI_PIN_ROW7 104
-#define GPI_PIN_COL0 105
-#define GPI_PIN_COL1 106
-#define GPI_PIN_COL2 107
-#define GPI_PIN_COL3 108
-#define GPI_PIN_COL4 109
-#define GPI_PIN_COL5 110
-#define GPI_PIN_COL6 111
-#define GPI_PIN_COL7 112
-#define GPI_PIN_COL8 113
-#define GPI_PIN_COL9 114
-
-#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
-#define GPI_PIN_ROW_END GPI_PIN_ROW7
-#define GPI_PIN_COL_BASE GPI_PIN_COL0
-#define GPI_PIN_COL_END GPI_PIN_COL9
-
-#define GPI_PIN_BASE GPI_PIN_ROW_BASE
-#define GPI_PIN_END GPI_PIN_COL_END
-
-#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
-
-struct adp5588_gpi_map {
- unsigned short pin;
- unsigned short sw_evt;
-};
-
-struct adp5588_kpad_platform_data {
- int rows; /* Number of rows */
- int cols; /* Number of columns */
- const unsigned short *keymap; /* Pointer to keymap */
- unsigned short keymapsize; /* Keymap size */
- unsigned repeat:1; /* Enable key repeat */
- unsigned en_keylock:1; /* Enable Key Lock feature */
- unsigned short unlock_key1; /* Unlock Key 1 */
- unsigned short unlock_key2; /* Unlock Key 2 */
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
- const struct adp5588_gpio_platform_data *gpio_data;
-};
-
-struct i2c_client; /* forward declaration */
-
-struct adp5588_gpio_platform_data {
- int gpio_start; /* GPIO Chip base # */
- const char *const *names;
- unsigned irq_base; /* interrupt base # */
- unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 8b1b795867a1..5744a2d746aa 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -5724,8 +5724,21 @@ enum typec_control_command {
TYPEC_CONTROL_COMMAND_EXIT_MODES,
TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ TYPEC_CONTROL_COMMAND_TBT_UFP_REPLY,
+ TYPEC_CONTROL_COMMAND_USB_MUX_SET,
};
+/* Replies the AP may specify to the TBT EnterMode command as a UFP */
+enum typec_tbt_ufp_reply {
+ TYPEC_TBT_UFP_REPLY_NAK,
+ TYPEC_TBT_UFP_REPLY_ACK,
+};
+
+struct typec_usb_mux_set {
+ uint8_t mux_index; /* Index of the mux to set in the chain */
+ uint8_t mux_flags; /* USB_PD_MUX_*-encoded USB mux state to set */
+} __ec_align1;
+
struct ec_params_typec_control {
uint8_t port;
uint8_t command; /* enum typec_control_command */
@@ -5739,6 +5752,8 @@ struct ec_params_typec_control {
union {
uint32_t clear_events_mask;
uint8_t mode_to_enter; /* enum typec_mode */
+ uint8_t tbt_ufp_reply; /* enum typec_tbt_ufp_reply */
+ struct typec_usb_mux_set mux_params;
uint8_t placeholder[128];
};
} __ec_align1;
@@ -5817,6 +5832,9 @@ enum tcpc_cc_polarity {
#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0)
#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1)
#define PD_STATUS_EVENT_HARD_RESET BIT(2)
+#define PD_STATUS_EVENT_DISCONNECTED BIT(3)
+#define PD_STATUS_EVENT_MUX_0_SET_DONE BIT(4)
+#define PD_STATUS_EVENT_MUX_1_SET_DONE BIT(5)
struct ec_params_typec_status {
uint8_t port;
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 408b29ca4004..e43107e0bee1 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -169,6 +169,7 @@ struct cros_ec_device {
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
+ u16 suspend_timeout_ms;
ktime_t last_event_time;
struct notifier_block notifier_ready;
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
index c65b412b2b33..611bae193c1c 100644
--- a/include/linux/platform_data/dma-hsu.h
+++ b/include/linux/platform_data/dma-hsu.h
@@ -8,7 +8,7 @@
#ifndef _PLATFORM_DATA_DMA_HSU_H
#define _PLATFORM_DATA_DMA_HSU_H
-#include <linux/device.h>
+struct device;
struct hsu_dma_slave {
struct device *dma_dev;
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
new file mode 100644
index 000000000000..54d672dd6f7d
--- /dev/null
+++ b/include/linux/platform_data/emc2305.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_EMC2305__
+#define __LINUX_PLATFORM_DATA_EMC2305__
+
+#define EMC2305_PWM_MAX 5
+
+/**
+ * struct emc2305_platform_data - EMC2305 driver platform data
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of active channels;
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ */
+struct emc2305_platform_data {
+ u8 max_state;
+ u8 pwm_num;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+};
+
+#endif
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 4eb53e023997..96c1a14ab365 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -22,7 +22,7 @@ struct pca953x_platform_data {
int (*setup)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
- int (*teardown)(struct i2c_client *client,
+ void (*teardown)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
const char *const *names;
diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h
deleted file mode 100644
index 3f9e632d6f63..000000000000
--- a/include/linux/platform_data/ssm2518.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SSM2518 amplifier audio driver
- *
- * Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__
-#define __LINUX_PLATFORM_DATA_SSM2518_H__
-
-/**
- * struct ssm2518_platform_data - Platform data for the ssm2518 driver
- * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is
- * hardwired.
- */
-struct ssm2518_platform_data {
- int enable_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/tps68470.h b/include/linux/platform_data/tps68470.h
index 126d082c3f2e..e605a2cab07f 100644
--- a/include/linux/platform_data/tps68470.h
+++ b/include/linux/platform_data/tps68470.h
@@ -27,9 +27,14 @@ struct tps68470_regulator_platform_data {
const struct regulator_init_data *reg_init_data[TPS68470_NUM_REGULATORS];
};
-struct tps68470_clk_platform_data {
+struct tps68470_clk_consumer {
const char *consumer_dev_name;
const char *consumer_con_id;
};
+struct tps68470_clk_platform_data {
+ unsigned int n_consumers;
+ struct tps68470_clk_consumer consumers[];
+};
+
#endif
diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h
index 07394819d03b..c0fbe1fe3426 100644
--- a/include/linux/platform_data/usb-s3c2410_udc.h
+++ b/include/linux/platform_data/usb-s3c2410_udc.h
@@ -22,12 +22,6 @@ enum s3c2410_udc_cmd_e {
struct s3c2410_udc_mach_info {
void (*udc_command)(enum s3c2410_udc_cmd_e);
void (*vbus_draw)(unsigned int ma);
-
- unsigned int pullup_pin;
- unsigned int pullup_pin_inverted;
-
- unsigned int vbus_pin;
- unsigned char vbus_pin_inverted;
};
extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 98f2b2f20f3e..28234dc9fa6a 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -65,6 +65,7 @@
#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -78,6 +79,7 @@
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
+#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
@@ -99,6 +101,15 @@
/* dgpu on/off */
#define ASUS_WMI_DEVID_DGPU 0x00090020
+/* gpu mux switch, 0 = dGPU, 1 = Optimus */
+#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
+
+/* TUF laptop RGB modes/colours */
+#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+
+/* TUF laptop RGB power/state */
+#define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
diff --git a/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
new file mode 100644
index 000000000000..23d60130272c
--- /dev/null
+++ b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+#define __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+
+#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
+
+/**
+ * enum wmi_brightness_method - WMI method IDs
+ * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
+ * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
+ */
+enum wmi_brightness_method {
+ WMI_BRIGHTNESS_METHOD_LEVEL = 1,
+ WMI_BRIGHTNESS_METHOD_SOURCE = 2,
+ WMI_BRIGHTNESS_METHOD_MAX
+};
+
+/**
+ * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
+ * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
+ * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
+ * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
+ * is only valid when the WMI method is
+ * %WMI_BRIGHTNESS_METHOD_LEVEL.
+ */
+enum wmi_brightness_mode {
+ WMI_BRIGHTNESS_MODE_GET = 0,
+ WMI_BRIGHTNESS_MODE_SET = 1,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
+ WMI_BRIGHTNESS_MODE_MAX
+};
+
+/**
+ * enum wmi_brightness_source - Backlight brightness control source selection
+ * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
+ * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
+ * system's Embedded Controller (EC).
+ * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
+ * DisplayPort AUX channel.
+ */
+enum wmi_brightness_source {
+ WMI_BRIGHTNESS_SOURCE_GPU = 1,
+ WMI_BRIGHTNESS_SOURCE_EC = 2,
+ WMI_BRIGHTNESS_SOURCE_AUX = 3,
+ WMI_BRIGHTNESS_SOURCE_MAX
+};
+
+/**
+ * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
+ * @mode: Pass in an &enum wmi_brightness_mode value to select between
+ * getting or setting a value.
+ * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
+ * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
+ * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
+ * @ret: Out parameter returning retrieved value when operating in
+ * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
+ * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
+ * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
+ *
+ * This is the parameters structure for the WmiBrightnessNotify ACPI method as
+ * wrapped by WMI. The value passed in to @val or returned by @ret will be a
+ * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
+ * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
+ */
+struct wmi_brightness_args {
+ u32 mode;
+ u32 val;
+ u32 ret;
+ u32 ignored[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index dd81f510e4cf..b8a701c77fd0 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Intel Atom SOC Power Management Controller Header File
- * Copyright (c) 2014, Intel Corporation.
+ * Intel Atom SoC Power Management Controller Header File
+ * Copyright (c) 2014-2015,2022 Intel Corporation.
*/
#ifndef PMC_ATOM_H
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
index 39fefd48cf4d..57d6a10dfc9e 100644
--- a/include/linux/platform_data/x86/simatic-ipc-base.h
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -19,6 +19,7 @@
#define SIMATIC_IPC_DEVICE_427E 2
#define SIMATIC_IPC_DEVICE_127E 3
#define SIMATIC_IPC_DEVICE_227E 4
+#define SIMATIC_IPC_DEVICE_227G 5
struct simatic_ipc_platform {
u8 devmode;
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
index f3b76b39776b..632320ec8f08 100644
--- a/include/linux/platform_data/x86/simatic-ipc.h
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -31,6 +31,8 @@ enum simatic_ipc_station_ids {
SIMATIC_IPC_IPC427E = 0x00000A01,
SIMATIC_IPC_IPC477E = 0x00000A02,
SIMATIC_IPC_IPC127E = 0x00000D01,
+ SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC427G = 0x00001001,
};
static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 871c9c49ec9d..93cd34f00822 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -375,19 +375,20 @@ const struct dev_pm_ops name = { \
}
#ifdef CONFIG_PM
-#define _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, runtime_suspend_fn, \
- runtime_resume_fn, idle_fn, sec, ns) \
- _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, runtime_suspend_fn, \
- runtime_resume_fn, idle_fn); \
- __EXPORT_SYMBOL(name, sec, ns)
+#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, sec, ns); \
+ const struct dev_pm_ops name
#else
-#define _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, runtime_suspend_fn, \
- runtime_resume_fn, idle_fn, sec, ns) \
-static __maybe_unused _DEFINE_DEV_PM_OPS(__static_##name, suspend_fn, \
- resume_fn, runtime_suspend_fn, \
- runtime_resume_fn, idle_fn)
+#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
#endif
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "_gpl", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "_gpl", #ns)
+
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
@@ -399,13 +400,21 @@ static __maybe_unused _DEFINE_DEV_PM_OPS(__static_##name, suspend_fn, \
_DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
- _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL, "", "")
+ EXPORT_DEV_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
- _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL, "_gpl", "")
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
- _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL, "", #ns)
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
- _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL, "_gpl", #ns)
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 0a41b2dcccad..9a8151a2bdea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -40,17 +40,21 @@
resume_fn, idle_fn)
#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
- _EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
- suspend_fn, resume_fn, idle_fn, "", "")
+ EXPORT_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
- _EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
- suspend_fn, resume_fn, idle_fn, "_gpl", "")
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
#define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
- _EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
- suspend_fn, resume_fn, idle_fn, "", #ns)
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
#define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
- _EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
- suspend_fn, resume_fn, idle_fn, "_gpl", #ns)
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
diff --git a/include/linux/poison.h b/include/linux/poison.h
index d62ef5a6b4e9..2d3249eb0e62 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -81,4 +81,7 @@
/********** net/core/page_pool.c **********/
#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+/********** kernel/bpf/ **********/
+#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index b6bd3eac2bcc..8163dd48c430 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -38,9 +38,6 @@ void posix_acl_fix_xattr_to_user(void *value, size_t size);
void posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
const struct inode *inode,
void *value, size_t size);
-void posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode,
- void *value, size_t size);
#else
static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
{
@@ -54,18 +51,15 @@ posix_acl_getxattr_idmapped_mnt(struct user_namespace *mnt_userns,
size_t size)
{
}
-static inline void
-posix_acl_setxattr_idmapped_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode, void *value,
- size_t size)
-{
-}
#endif
struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
const void *value, size_t size);
int posix_acl_to_xattr(struct user_namespace *user_ns,
const struct posix_acl *acl, void *buffer, size_t size);
+struct posix_acl *vfs_set_acl_prepare(struct user_namespace *mnt_userns,
+ struct user_namespace *fs_userns,
+ const void *value, size_t size);
extern const struct xattr_handler posix_acl_access_xattr_handler;
extern const struct xattr_handler posix_acl_default_xattr_handler;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index cb380c1d9459..aa2c4a7c4826 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -374,9 +374,37 @@ struct power_supply_vbat_ri_table {
* These timers should be chosen to align with the typical discharge curve
* for the battery.
*
- * When the main CC/CV charging is complete the battery can optionally be
- * maintenance charged at the voltages from this table: a table of settings is
- * traversed using a slightly lower current and voltage than what is used for
+ * Ordinary CC/CV charging will stop charging when the charge current goes
+ * below charge_term_current_ua, and then restart it (if the device is still
+ * plugged into the charger) at charge_restart_voltage_uv. This happens in most
+ * consumer products because the power usage while connected to a charger is
+ * not zero, and devices are not manufactured to draw power directly from the
+ * charger: instead they will at all times dissipate the battery a little, like
+ * the power used in standby mode. This will over time give a charge graph
+ * such as this:
+ *
+ * Energy
+ * ^ ... ... ... ... ... ... ...
+ * | . . . . . . . . . . . . .
+ * | .. . .. . .. . .. . .. . .. . ..
+ * |. .. .. .. .. .. ..
+ * +-------------------------------------------------------------------> t
+ *
+ * Practically this means that the Li-ions are wandering back and forth in the
+ * battery and this causes degeneration of the battery anode and cathode.
+ * To prolong the life of the battery, maintenance charging is applied after
+ * reaching charge_term_current_ua to hold up the charge in the battery while
+ * consuming power, thus lowering the wear on the battery:
+ *
+ * Energy
+ * ^ .......................................
+ * | . ......................
+ * | ..
+ * |.
+ * +-------------------------------------------------------------------> t
+ *
+ * Maintenance charging uses the voltages from this table: a table of settings
+ * is traversed using a slightly lower current and voltage than what is used for
* CC/CV charging. The maintenance charging will for safety reasons not go on
* indefinately: we lower the current and voltage with successive maintenance
* settings, then disable charging completely after we reach the last one,
@@ -385,14 +413,22 @@ struct power_supply_vbat_ri_table {
* ordinary CC/CV charging from there.
*
* As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged
- * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for
- * 60 hours, then maintenance charged at 600mA and 4100mV for 200 hours.
+ * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for up to
+ * 60 hours, then maintenance charged at 600mA and 4100mV for up to 200 hours.
* After this the charge cycle is restarted waiting for
* charge_restart_voltage_uv.
*
* For most mobile electronics this type of maintenance charging is enough for
* the user to disconnect the device and make use of it before both maintenance
- * charging cycles are complete.
+ * charging cycles are complete, if the current and voltage has been chosen
+ * appropriately. These need to be determined from battery discharge curves
+ * and expected standby current.
+ *
+ * If the voltage anyway drops to charge_restart_voltage_uv during maintenance
+ * charging, ordinary CC/CV charging is restarted. This can happen if the
+ * device is e.g. actively used during charging, so more current is drawn than
+ * the expected stand-by current. Also overvoltage protection will be applied
+ * as usual.
*/
struct power_supply_maintenance_charge_table {
int charge_current_max_ua;
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index deace5fb4e62..78db003bc290 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -12,11 +12,13 @@
#include <linux/percpu.h>
#include <linux/random.h>
+/* Deprecated: use get_random_u32 instead. */
static inline u32 prandom_u32(void)
{
return get_random_u32();
}
+/* Deprecated: use get_random_bytes instead. */
static inline void prandom_bytes(void *buf, size_t nbytes)
{
return get_random_bytes(buf, nbytes);
@@ -37,17 +39,20 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
* @ep_ro: right open interval endpoint
*
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
+ * Returns a pseudo-random number that is in interval [0, ep_ro). This is
+ * useful when requesting a random index of an array containing ep_ro elements,
+ * for example. The result is somewhat biased when ep_ro is not a power of 2,
+ * so do not use this for cryptographic purposes.
*
* Returns: pseudo-random number in interval [0, ep_ro)
*/
static inline u32 prandom_u32_max(u32 ep_ro)
{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+ if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8)
+ return (get_random_u8() * ep_ro) >> 8;
+ if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16)
+ return (get_random_u16() * ep_ro) >> 16;
+ return ((u64)get_random_u32() * ep_ro) >> 32;
}
/*
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index b4381f255a5c..0df425bf9bd7 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -421,4 +421,46 @@ static inline void migrate_enable(void) { }
#endif /* CONFIG_SMP */
+/**
+ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
+ *
+ * Use for code which requires preemption protection inside a critical
+ * section which has preemption disabled implicitly on non-PREEMPT_RT
+ * enabled kernels, by e.g.:
+ * - holding a spinlock/rwlock
+ * - soft interrupt context
+ * - regular interrupt handlers
+ *
+ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
+ * interrupt context and regular interrupt handlers are preemptible and
+ * only prevent migration. preempt_disable_nested() ensures that preemption
+ * is disabled for cases which require CPU local serialization even on
+ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
+ *
+ * The use cases are code sequences which are not serialized by a
+ * particular lock instance, e.g.:
+ * - seqcount write side critical sections where the seqcount is not
+ * associated to a particular lock and therefore the automatic
+ * protection mechanism does not work. This prevents a live lock
+ * against a preempting high priority reader.
+ * - RMW per CPU variable updates like vmstat.
+ */
+/* Macro to avoid header recursion hell vs. lockdep */
+#define preempt_disable_nested() \
+do { \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ preempt_disable(); \
+ else \
+ lockdep_assert_preemption_disabled(); \
+} while (0)
+
+/**
+ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
+ */
+static __always_inline void preempt_enable_nested(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cf7d666ab1f8..8c81806c2e99 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -169,8 +169,6 @@ extern void __printk_safe_exit(void);
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit
-extern bool pr_flush(int timeout_ms, bool reset_on_progress);
-
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
@@ -221,11 +219,6 @@ static inline void printk_deferred_exit(void)
{
}
-static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
-{
- return true;
-}
-
static inline int printk_ratelimit(void)
{
return 0;
diff --git a/include/linux/property.h b/include/linux/property.h
index a5b429d623f6..117cc200c656 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -32,7 +32,7 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
-struct fwnode_handle *dev_fwnode(struct device *dev);
+struct fwnode_handle *dev_fwnode(const struct device *dev);
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
@@ -387,7 +387,7 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
-const void *device_get_match_data(struct device *dev);
+const void *device_get_match_data(const struct device *dev);
int device_get_phy_mode(struct device *dev);
int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
new file mode 100644
index 000000000000..fb724c65c77b
--- /dev/null
+++ b/include/linux/pse-pd/pse.h
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+#ifndef _LINUX_PSE_CONTROLLER_H
+#define _LINUX_PSE_CONTROLLER_H
+
+#include <linux/ethtool.h>
+#include <linux/list.h>
+#include <uapi/linux/ethtool.h>
+
+struct phy_device;
+struct pse_controller_dev;
+
+/**
+ * struct pse_control_config - PSE control/channel configuration.
+ *
+ * @admin_cotrol: set PoDL PSE admin control as described in
+ * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl
+ */
+struct pse_control_config {
+ enum ethtool_podl_pse_admin_state admin_cotrol;
+};
+
+/**
+ * struct pse_control_status - PSE control/channel status.
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ */
+struct pse_control_status {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+};
+
+/**
+ * struct pse_controller_ops - PSE controller driver callbacks
+ *
+ * @ethtool_get_status: get PSE control status for ethtool interface
+ * @ethtool_set_config: set PSE control configuration over ethtool interface
+ */
+struct pse_controller_ops {
+ int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+ int (*ethtool_set_config)(struct pse_controller_dev *pcdev,
+ unsigned long id, struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+};
+
+struct module;
+struct device_node;
+struct of_phandle_args;
+struct pse_control;
+
+/**
+ * struct pse_controller_dev - PSE controller entity that might
+ * provide multiple PSE controls
+ * @ops: a pointer to device specific struct pse_controller_ops
+ * @owner: kernel module of the PSE controller driver
+ * @list: internal list of PSE controller devices
+ * @pse_control_head: head of internal list of requested PSE controls
+ * @dev: corresponding driver model device struct
+ * @of_pse_n_cells: number of cells in PSE line specifiers
+ * @of_xlate: translation function to translate from specifier as found in the
+ * device tree to id as given to the PSE control ops
+ * @nr_lines: number of PSE controls in this controller device
+ * @lock: Mutex for serialization access to the PSE controller
+ */
+struct pse_controller_dev {
+ const struct pse_controller_ops *ops;
+ struct module *owner;
+ struct list_head list;
+ struct list_head pse_control_head;
+ struct device *dev;
+ int of_pse_n_cells;
+ int (*of_xlate)(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec);
+ unsigned int nr_lines;
+ struct mutex lock;
+};
+
+#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
+int pse_controller_register(struct pse_controller_dev *pcdev);
+void pse_controller_unregister(struct pse_controller_dev *pcdev);
+struct device;
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev);
+
+struct pse_control *of_pse_control_get(struct device_node *node);
+void pse_control_put(struct pse_control *psec);
+
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+
+#else
+
+static inline struct pse_control *of_pse_control_get(struct device_node *node)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline void pse_control_put(struct pse_control *psec)
+{
+}
+
+static inline int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ return -ENOTSUPP;
+}
+
+static inline int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/psi.h b/include/linux/psi.h
index dd74411ac21d..b029a847def1 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/poll.h>
#include <linux/cgroup-defs.h>
+#include <linux/cgroup.h>
struct seq_file;
struct css_set;
@@ -18,10 +19,6 @@ extern struct psi_group psi_system;
void psi_init(void);
-void psi_task_change(struct task_struct *task, int clear, int set);
-void psi_task_switch(struct task_struct *prev, struct task_struct *next,
- bool sleep);
-
void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
@@ -34,9 +31,15 @@ __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
poll_table *wait);
#ifdef CONFIG_CGROUPS
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+}
+
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
void cgroup_move_task(struct task_struct *p, struct css_set *to);
+void psi_cgroup_restart(struct psi_group *group);
#endif
#else /* CONFIG_PSI */
@@ -58,6 +61,7 @@ static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
{
rcu_assign_pointer(p->cgroups, to);
}
+static inline void psi_cgroup_restart(struct psi_group *group) {}
#endif
#endif /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index c7fe7c089718..6e4372735068 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -16,13 +16,6 @@ enum psi_task_count {
NR_MEMSTALL,
NR_RUNNING,
/*
- * This can't have values other than 0 or 1 and could be
- * implemented as a bit flag. But for now we still have room
- * in the first cacheline of psi_group_cpu, and this way we
- * don't have to special case any state tracking for it.
- */
- NR_ONCPU,
- /*
* For IO and CPU stalls the presence of running/oncpu tasks
* in the domain means a partial rather than a full stall.
* For memory it's not so simple because of page reclaimers:
@@ -32,22 +25,27 @@ enum psi_task_count {
* threads and memstall ones.
*/
NR_MEMSTALL_RUNNING,
- NR_PSI_TASK_COUNTS = 5,
+ NR_PSI_TASK_COUNTS = 4,
};
/* Task state bitmasks */
#define TSK_IOWAIT (1 << NR_IOWAIT)
#define TSK_MEMSTALL (1 << NR_MEMSTALL)
#define TSK_RUNNING (1 << NR_RUNNING)
-#define TSK_ONCPU (1 << NR_ONCPU)
#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING)
+/* Only one task can be scheduled, no corresponding task count */
+#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS)
+
/* Resources that workloads could be stalled on */
enum psi_res {
PSI_IO,
PSI_MEM,
PSI_CPU,
- NR_PSI_RESOURCES = 3,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ,
+#endif
+ NR_PSI_RESOURCES,
};
/*
@@ -63,11 +61,17 @@ enum psi_states {
PSI_MEM_FULL,
PSI_CPU_SOME,
PSI_CPU_FULL,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ_FULL,
+#endif
/* Only per-CPU, to weigh the CPU in the global average: */
PSI_NONIDLE,
- NR_PSI_STATES = 7,
+ NR_PSI_STATES,
};
+/* Use one bit in the state mask to track TSK_ONCPU */
+#define PSI_ONCPU (1 << NR_PSI_STATES)
+
enum psi_aggregators {
PSI_AVGS = 0,
PSI_POLL,
@@ -147,6 +151,9 @@ struct psi_trigger {
};
struct psi_group {
+ struct psi_group *parent;
+ bool enabled;
+
/* Protects data used by the aggregator */
struct mutex avgs_lock;
@@ -188,6 +195,8 @@ struct psi_group {
#else /* CONFIG_PSI */
+#define NR_PSI_RESOURCES 0
+
struct psi_group { };
#endif /* CONFIG_PSI */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 9429930c5566..d70c6e5a839d 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -403,13 +403,9 @@ struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
void pwm_put(struct pwm_device *pwm);
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
@@ -497,14 +493,6 @@ static inline struct pwm_device *pwm_get(struct device *dev,
return ERR_PTR(-ENODEV);
}
-static inline struct pwm_device *of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
- might_sleep();
- return ERR_PTR(-ENODEV);
-}
-
static inline void pwm_put(struct pwm_device *pwm)
{
might_sleep();
@@ -517,14 +505,6 @@ static inline struct pwm_device *devm_pwm_get(struct device *dev,
return ERR_PTR(-ENODEV);
}
-static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
- might_sleep();
- return ERR_PTR(-ENODEV);
-}
-
static inline struct pwm_device *
devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
const char *con_id)
diff --git a/include/linux/random.h b/include/linux/random.h
index 3fec206487f6..08322f700cdc 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -38,6 +38,8 @@ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) {
#endif
void get_random_bytes(void *buf, size_t len);
+u8 get_random_u8(void);
+u16 get_random_u16(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
static inline unsigned int get_random_int(void)
@@ -72,7 +74,8 @@ static inline unsigned long get_random_canary(void)
return get_random_long() & CANARY_MASK;
}
-int __init random_init(const char *command_line);
+void __init random_init_early(const char *command_line);
+void __init random_init(void);
bool rng_is_initialized(void);
int wait_for_random_bytes(void);
@@ -93,6 +96,8 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes)
*out = get_random_ ## name(); \
return 0; \
}
+declare_get_random_var_wait(u8, u8)
+declare_get_random_var_wait(u16, u16)
declare_get_random_var_wait(u32, u32)
declare_get_random_var_wait(u64, u32)
declare_get_random_var_wait(int, unsigned int)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f527f27e6438..08605ce7379d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -42,7 +42,31 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
+
+struct rcu_gp_oldstate;
unsigned long get_completed_synchronize_rcu(void);
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_rcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
+ * get_completed_synchronize_rcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
#ifdef CONFIG_PREEMPT_RCU
@@ -496,13 +520,21 @@ do { \
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
+ * Within an RCU read-side critical section, there is little reason to
+ * use rcu_access_pointer().
+ *
+ * It is usually best to test the rcu_access_pointer() return value
+ * directly in order to avoid accidental dereferences being introduced
+ * by later inattentive changes. In other words, assigning the
+ * rcu_access_pointer() return value to a local variable results in an
+ * accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
- * access to the pointer was removed at least one grace period ago, as
- * is the case in the context of the RCU callback that is freeing up
- * the data, or after a synchronize_rcu() returns. This can be useful
- * when tearing down multi-linked structures after a grace period
- * has elapsed.
+ * access to the pointer was removed at least one grace period ago, as is
+ * the case in the context of the RCU callback that is freeing up the data,
+ * or after a synchronize_rcu() returns. This can be useful when tearing
+ * down multi-linked structures after a grace period has elapsed. However,
+ * rcu_dereference_protected() is normally preferred for this use case.
*/
#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 62815c0a2dce..768196a5f39d 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,25 +14,75 @@
#include <asm/param.h> /* for HZ */
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
+
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
+}
+
unsigned long get_state_synchronize_rcu(void);
+
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
bool poll_state_synchronize_rcu(unsigned long oldstate);
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
+}
+
static inline void cond_synchronize_rcu(unsigned long oldstate)
{
might_sleep();
}
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu(rgosp->rgos_norm);
+}
+
static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
return start_poll_synchronize_rcu();
}
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
+
static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
cond_synchronize_rcu(oldstate);
}
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
+}
+
extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 47eaa4cb0df7..5efb51486e8a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -40,12 +40,52 @@ bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+ unsigned long rgos_exp;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
+
+/**
+ * same_state_synchronize_rcu_full - Are two old-state values identical?
+ * @rgosp1: First old-state value.
+ * @rgosp2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or get_completed_synchronize_rcu_full(). Returns @true if the two
+ * values are identical and @false otherwise. This allows structures
+ * whose lifetimes are tracked by old-state values to push these values
+ * to a list header, allowing those structures to be slightly smaller.
+ *
+ * Note that equality is judged on a bitwise basis, so that an
+ * @rcu_gp_oldstate structure with an already-completed state in one field
+ * will compare not-equal to a structure with an already-completed state
+ * in the other field. After all, the @rcu_gp_oldstate structure is opaque
+ * so how did such a situation come to pass in the first place?
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
+}
+
unsigned long start_poll_synchronize_rcu_expedited(void);
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu_expedited(unsigned long oldstate);
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
unsigned long get_state_synchronize_rcu(void);
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
unsigned long start_poll_synchronize_rcu(void);
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
bool poll_state_synchronize_rcu(unsigned long oldstate);
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
bool rcu_is_idle_cpu(int cpu);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index e5d9ef886179..2b6bb593be5b 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -106,6 +106,14 @@ enum sys_off_mode {
SYS_OFF_MODE_POWER_OFF,
/**
+ * @SYS_OFF_MODE_RESTART_PREPARE:
+ *
+ * Handlers prepare system to be restarted. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART_PREPARE,
+
+ /**
* @SYS_OFF_MODE_RESTART:
*
* Handlers restart system. Handlers are disallowed to sleep.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 7cf2157134ac..ca3434dca3a0 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -311,6 +311,8 @@ typedef void (*regmap_unlock)(void *);
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
+ * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
+ * access can be distinguished.
* @max_register: Optional, specifies the maximum valid register address.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
@@ -399,6 +401,7 @@ struct regmap_config {
size_t max_raw_write;
bool fast_io;
+ bool io_port;
unsigned int max_register;
const struct regmap_access_table *wr_table;
@@ -489,8 +492,12 @@ typedef int (*regmap_hw_read)(void *context,
void *val_buf, size_t val_size);
typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
unsigned int *val);
+typedef int (*regmap_hw_reg_noinc_read)(void *context, unsigned int reg,
+ void *val, size_t val_count);
typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
unsigned int val);
+typedef int (*regmap_hw_reg_noinc_write)(void *context, unsigned int reg,
+ const void *val, size_t val_count);
typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
@@ -511,6 +518,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* must serialise with respect to non-async I/O.
* @reg_write: Write a single register value to the given register address. This
* write operation has to complete when returning from the function.
+ * @reg_write_noinc: Write multiple register value to the same register. This
+ * write operation has to complete when returning from the function.
* @reg_update_bits: Update bits operation to be used against volatile
* registers, intended for devices supporting some mechanism
* for setting clearing bits without having to
@@ -538,9 +547,11 @@ struct regmap_bus {
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
regmap_hw_reg_write reg_write;
+ regmap_hw_reg_noinc_write reg_noinc_write;
regmap_hw_reg_update_bits reg_update_bits;
regmap_hw_read read;
regmap_hw_reg_read reg_read;
+ regmap_hw_reg_noinc_read reg_noinc_read;
regmap_hw_free_context free_context;
regmap_hw_async_alloc async_alloc;
u8 read_flag_mask;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index bc6cda706d1f..ee3b4a014611 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -207,6 +207,8 @@ struct regulator *__must_check regulator_get_optional(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
+int devm_regulator_get_enable(struct device *dev, const char *id);
+int devm_regulator_get_enable_optional(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -244,12 +246,15 @@ int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
int __must_check devm_regulator_bulk_get_const(
struct device *dev, int num_consumers,
const struct regulator_bulk_data *in_consumers,
struct regulator_bulk_data **out_consumers);
int __must_check regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
@@ -354,6 +359,17 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
return ERR_PTR(-ENODEV);
}
+static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return -ENODEV;
+}
+
+static inline int devm_regulator_get_enable_optional(struct device *dev,
+ const char *id)
+{
+ return -ENODEV;
+}
+
static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
@@ -375,6 +391,10 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+}
+
static inline int regulator_register_supply_alias(struct device *dev,
const char *id,
struct device *alias_dev,
@@ -465,6 +485,13 @@ static inline int regulator_bulk_enable(int num_consumers,
return 0;
}
+static inline int devm_regulator_bulk_get_enable(struct device *dev,
+ int num_consumers,
+ const char * const *id)
+{
+ return 0;
+}
+
static inline int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers)
{
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
index fdeb312cdabd..c223e50ff9f7 100644
--- a/include/linux/regulator/gpio-regulator.h
+++ b/include/linux/regulator/gpio-regulator.h
@@ -42,6 +42,7 @@ struct gpio_regulator_state {
/**
* struct gpio_regulator_config - config structure
* @supply_name: Name of the regulator supply
+ * @input_supply: Name of the input regulator supply
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
@@ -62,6 +63,7 @@ struct gpio_regulator_state {
*/
struct gpio_regulator_config {
const char *supply_name;
+ const char *input_supply;
unsigned enabled_at_boot:1;
unsigned startup_delay;
diff --git a/include/linux/regulator/mt6331-regulator.h b/include/linux/regulator/mt6331-regulator.h
new file mode 100644
index 000000000000..2801a9879c14
--- /dev/null
+++ b/include/linux/regulator/mt6331-regulator.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6331_H
+#define __LINUX_REGULATOR_MT6331_H
+
+enum {
+ /* BUCK */
+ MT6331_ID_VDVFS11 = 0,
+ MT6331_ID_VDVFS12,
+ MT6331_ID_VDVFS13,
+ MT6331_ID_VDVFS14,
+ MT6331_ID_VCORE2,
+ MT6331_ID_VIO18,
+ /* LDO */
+ MT6331_ID_VTCXO1,
+ MT6331_ID_VTCXO2,
+ MT6331_ID_AVDD32_AUD,
+ MT6331_ID_VAUXA32,
+ MT6331_ID_VCAMA,
+ MT6331_ID_VIO28,
+ MT6331_ID_VCAM_AF,
+ MT6331_ID_VMC,
+ MT6331_ID_VMCH,
+ MT6331_ID_VEMC33,
+ MT6331_ID_VGP1,
+ MT6331_ID_VSIM1,
+ MT6331_ID_VSIM2,
+ MT6331_ID_VMIPI,
+ MT6331_ID_VIBR,
+ MT6331_ID_VGP4,
+ MT6331_ID_VCAMD,
+ MT6331_ID_VUSB10,
+ MT6331_ID_VCAM_IO,
+ MT6331_ID_VSRAM_DVFS1,
+ MT6331_ID_VGP2,
+ MT6331_ID_VGP3,
+ MT6331_ID_VRTC,
+ MT6331_ID_VDIG18,
+ MT6331_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6331_H */
diff --git a/include/linux/regulator/mt6332-regulator.h b/include/linux/regulator/mt6332-regulator.h
new file mode 100644
index 000000000000..af5e3ed31029
--- /dev/null
+++ b/include/linux/regulator/mt6332-regulator.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6332_H
+#define __LINUX_REGULATOR_MT6332_H
+
+enum {
+ /* BUCK */
+ MT6332_ID_VDRAM = 0,
+ MT6332_ID_VDVFS2,
+ MT6332_ID_VPA,
+ MT6332_ID_VRF1,
+ MT6332_ID_VRF2,
+ MT6332_ID_VSBST,
+ /* LDO */
+ MT6332_ID_VAUXB32,
+ MT6332_ID_VBIF28,
+ MT6332_ID_VDIG18,
+ MT6332_ID_VSRAM_DVFS2,
+ MT6332_ID_VUSB33,
+ MT6332_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6332_H */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index aea79c77db0f..fe8978eb69f1 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -490,6 +490,20 @@ struct rproc_dump_segment {
};
/**
+ * enum rproc_features - features supported
+ *
+ * @RPROC_FEAT_ATTACH_ON_RECOVERY: The remote processor does not need help
+ * from Linux to recover, such as firmware
+ * loading. Linux just needs to attach after
+ * recovery.
+ */
+
+enum rproc_features {
+ RPROC_FEAT_ATTACH_ON_RECOVERY,
+ RPROC_MAX_FEATURES,
+};
+
+/**
* struct rproc - represents a physical remote processor device
* @node: list node of this rproc object
* @domain: iommu domain
@@ -530,6 +544,7 @@ struct rproc_dump_segment {
* @elf_machine: firmware ELF machine
* @cdev: character device of the rproc
* @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
+ * @features: indicate remoteproc features
*/
struct rproc {
struct list_head node;
@@ -570,6 +585,7 @@ struct rproc {
u16 elf_machine;
struct cdev cdev;
bool cdev_put_on_release;
+ DECLARE_BITMAP(features, RPROC_MAX_FEATURES);
};
/**
@@ -616,9 +632,8 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
- * @refcount: reference counter for the vdev and vring allocations
* @subdev: handle for registering the vdev as a rproc subdevice
- * @dev: device struct used for reference count semantics
+ * @pdev: remoteproc virtio platform device
* @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
@@ -627,10 +642,9 @@ struct rproc_vring {
* @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
- struct kref refcount;
struct rproc_subdev subdev;
- struct device dev;
+ struct platform_device *pdev;
unsigned int id;
struct list_head node;
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 21deb5212bbd..0cf5b20c6ddf 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -15,6 +15,9 @@ int proc_resctrl_show(struct seq_file *m,
#endif
+/* max value for struct rdt_domain's mbps_val */
+#define MBA_MAX_MBPS U32_MAX
+
/**
* enum resctrl_conf_type - The type of configuration.
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
@@ -29,6 +32,16 @@ enum resctrl_conf_type {
#define CDP_NUM_TYPES (CDP_DATA + 1)
+/*
+ * Event IDs, the values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
+enum resctrl_event_id {
+ QOS_L3_OCCUP_EVENT_ID = 0x01,
+ QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
+ QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+};
+
/**
* struct resctrl_staged_config - parsed configuration to be applied
* @new_ctrl: new ctrl value to be loaded
@@ -53,6 +66,9 @@ struct resctrl_staged_config {
* @cqm_work_cpu: worker CPU for CQM h/w counters
* @plr: pseudo-locked region (if any) associated with domain
* @staged_config: parsed configuration to be applied
+ * @mbps_val: When mba_sc is enabled, this holds the array of user
+ * specified control values for mba_sc in MBps, indexed
+ * by closid
*/
struct rdt_domain {
struct list_head list;
@@ -67,6 +83,7 @@ struct rdt_domain {
int cqm_work_cpu;
struct pseudo_lock_region *plr;
struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
};
/**
@@ -130,8 +147,6 @@ struct resctrl_schema;
/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
- * @alloc_enabled: Is allocation enabled on this machine
- * @mon_enabled: Is monitoring enabled for this feature
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
* @num_rmid: Number of RMIDs available
@@ -150,8 +165,6 @@ struct resctrl_schema;
*/
struct rdt_resource {
int rid;
- bool alloc_enabled;
- bool mon_enabled;
bool alloc_capable;
bool mon_capable;
int num_rmid;
@@ -194,7 +207,50 @@ struct resctrl_schema {
/* The number of closid supported by this resource regardless of CDP */
u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+
+/*
+ * Update the ctrl_val and apply this config right now.
+ * Must be called on one of the domain's CPUs.
+ */
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+ u32 closid, enum resctrl_conf_type t, u32 cfg_val);
+
u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
u32 closid, enum resctrl_conf_type type);
+int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
+
+/**
+ * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
+ * for this resource and domain.
+ * @r: resource that the counter should be read from.
+ * @d: domain that the counter should be read from.
+ * @rmid: rmid of the counter to read.
+ * @eventid: eventid to read, e.g. L3 occupancy.
+ * @val: result of the counter read in bytes.
+ *
+ * Call from process context on a CPU that belongs to domain @d.
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+ u32 rmid, enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
+ * and eventid.
+ * @r: The domain's resource.
+ * @d: The rmid's domain.
+ * @rmid: The rmid whose counter values should be reset.
+ * @eventid: The eventid whose counter values should be reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+ u32 rmid, enum resctrl_event_id eventid);
+
+extern unsigned int resctrl_rmid_realloc_threshold;
+extern unsigned int resctrl_rmid_realloc_limit;
#endif /* _RESCTRL_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index dac53fd3afea..2504df9a0453 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -101,7 +101,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
-
+void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b89b4b86951f..bd3504d11b15 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -166,7 +166,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *page_get_anon_vma(struct page *page);
+struct anon_vma *folio_get_anon_vma(struct folio *folio);
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@@ -270,7 +270,7 @@ dup:
* @page: the exclusive anonymous page to try marking possibly shared
*
* The caller needs to hold the PT lock and has to have the page table entry
- * cleared/invalidated+flushed, to properly sync against GUP-fast.
+ * cleared/invalidated.
*
* This is similar to page_try_dup_anon_rmap(), however, not used during fork()
* to duplicate a mapping, but instead to prepare for KSM or temporarily
@@ -286,12 +286,68 @@ static inline int page_try_share_anon_rmap(struct page *page)
{
VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
- /* See page_try_dup_anon_rmap(). */
- if (likely(!is_device_private_page(page) &&
- unlikely(page_maybe_dma_pinned(page))))
- return -EBUSY;
+ /* device private pages cannot get pinned via GUP. */
+ if (unlikely(is_device_private_page(page))) {
+ ClearPageAnonExclusive(page);
+ return 0;
+ }
+
+ /*
+ * We have to make sure that when we clear PageAnonExclusive, that
+ * the page is not pinned and that concurrent GUP-fast won't succeed in
+ * concurrently pinning the page.
+ *
+ * Conceptually, PageAnonExclusive clearing consists of:
+ * (A1) Clear PTE
+ * (A2) Check if the page is pinned; back off if so.
+ * (A3) Clear PageAnonExclusive
+ * (A4) Restore PTE (optional, but certainly not writable)
+ *
+ * When clearing PageAnonExclusive, we cannot possibly map the page
+ * writable again, because anon pages that may be shared must never
+ * be writable. So in any case, if the PTE was writable it cannot
+ * be writable anymore afterwards and there would be a PTE change. Only
+ * if the PTE wasn't writable, there might not be a PTE change.
+ *
+ * Conceptually, GUP-fast pinning of an anon page consists of:
+ * (B1) Read the PTE
+ * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
+ * (B3) Pin the mapped page
+ * (B4) Check if the PTE changed by re-reading it; back off if so.
+ * (B5) If the original PTE is not writable, check if
+ * PageAnonExclusive is not set; back off if so.
+ *
+ * If the PTE was writable, we only have to make sure that GUP-fast
+ * observes a PTE change and properly backs off.
+ *
+ * If the PTE was not writable, we have to make sure that GUP-fast either
+ * detects a (temporary) PTE change or that PageAnonExclusive is cleared
+ * and properly backs off.
+ *
+ * Consequently, when clearing PageAnonExclusive(), we have to make
+ * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
+ * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
+ * and (B5) happen in the right memory order.
+ *
+ * We assume that there might not be a memory barrier after
+ * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
+ * so we use explicit ones here.
+ */
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(page_maybe_dma_pinned(page)))
+ return -EBUSY;
ClearPageAnonExclusive(page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
return 0;
}
@@ -405,13 +461,8 @@ struct rmap_walk_control {
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
-
-/*
- * Called by memory-failure.c to kill processes.
- */
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
struct rmap_walk_control *rwc);
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
#else /* !CONFIG_MMU */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 8f416c5e929e..c0ef596f340b 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 8f5a86e210b9..4d2d5205ab58 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -575,8 +575,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
* sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
* on a &struct sbitmap_queue.
* @sbq: Bitmap queue to wake up.
+ * @nr: Number of bits cleared.
*/
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e7b2f8a5c711..ffb6eb55cd13 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -14,6 +14,7 @@
#include <linux/pid.h>
#include <linux/sem.h>
#include <linux/shm.h>
+#include <linux/kmsan_types.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
@@ -81,25 +82,34 @@ struct task_group;
*/
/* Used in tsk->state: */
-#define TASK_RUNNING 0x0000
-#define TASK_INTERRUPTIBLE 0x0001
-#define TASK_UNINTERRUPTIBLE 0x0002
-#define __TASK_STOPPED 0x0004
-#define __TASK_TRACED 0x0008
+#define TASK_RUNNING 0x00000000
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define __TASK_STOPPED 0x00000004
+#define __TASK_TRACED 0x00000008
/* Used in tsk->exit_state: */
-#define EXIT_DEAD 0x0010
-#define EXIT_ZOMBIE 0x0020
+#define EXIT_DEAD 0x00000010
+#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
-#define TASK_PARKED 0x0040
-#define TASK_DEAD 0x0080
-#define TASK_WAKEKILL 0x0100
-#define TASK_WAKING 0x0200
-#define TASK_NOLOAD 0x0400
-#define TASK_NEW 0x0800
-/* RT specific auxilliary flag to mark RT lock waiters */
-#define TASK_RTLOCK_WAIT 0x1000
-#define TASK_STATE_MAX 0x2000
+#define TASK_PARKED 0x00000040
+#define TASK_DEAD 0x00000080
+#define TASK_WAKEKILL 0x00000100
+#define TASK_WAKING 0x00000200
+#define TASK_NOLOAD 0x00000400
+#define TASK_NEW 0x00000800
+#define TASK_RTLOCK_WAIT 0x00001000
+#define TASK_FREEZABLE 0x00002000
+#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
+#define TASK_FROZEN 0x00008000
+#define TASK_STATE_MAX 0x00010000
+
+#define TASK_ANY (TASK_STATE_MAX-1)
+
+/*
+ * DO NOT ADD ANY NEW USERS !
+ */
+#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -860,9 +870,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- /* Per-thread vma caching: */
- struct vmacache vmacache;
-
#ifdef SPLIT_RSS_COUNTING
struct task_rss_stat rss_stat;
#endif
@@ -914,6 +921,10 @@ struct task_struct {
#ifdef CONFIG_MEMCG
unsigned in_user_fault:1;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* whether the LRU algorithm may apply to this access */
+ unsigned in_lru_fault:1;
+#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
@@ -936,7 +947,7 @@ struct task_struct {
#endif
#ifdef CONFIG_EVENTFD
/* Recursion prevention for eventfd_signal() */
- unsigned in_eventfd_signal:1;
+ unsigned in_eventfd:1;
#endif
#ifdef CONFIG_IOMMU_SVA
unsigned pasid_activated:1;
@@ -944,6 +955,10 @@ struct task_struct {
#ifdef CONFIG_CPU_SUP_INTEL
unsigned reported_split_lock:1;
#endif
+#ifdef CONFIG_TASK_DELAY_ACCT
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -1355,6 +1370,10 @@ struct task_struct {
#endif
#endif
+#ifdef CONFIG_KMSAN
+ struct kmsan_ctx kmsan_ctx;
+#endif
+
#if IS_ENABLED(CONFIG_KUNIT)
struct kunit *kunit_test;
#endif
@@ -1381,9 +1400,6 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACING
- /* State flags for use by tracers: */
- unsigned long trace;
-
/* Bitmask and counter of trace recursion: */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
@@ -1713,8 +1729,9 @@ extern struct pid *cad_pid;
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
+#define PF__HOLE__00004000 0x00004000
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
+#define PF__HOLE__00010000 0x00010000
#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
@@ -1722,10 +1739,14 @@ extern struct pid *cad_pid;
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
+#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
-#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF__HOLE__20000000 0x20000000
+#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
/*
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 4d0a5be28b70..8270ad7ae14c 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,9 +71,8 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
-#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
-#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
/*
* MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
* replaced in the future by mm.pinned_vm when it becomes stable, or grow into
@@ -81,7 +80,7 @@ static inline int get_dumpable(struct mm_struct *mm)
* pinned pages were unpinned later on, we'll still keep this bit set for the
* lifecycle of this mm, just for simplicity.
*/
-#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index cafbe03eed01..20099268fa25 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -94,6 +94,7 @@ struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
+ int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index e650946816d0..303ee7dd0c7e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -27,6 +27,7 @@ enum sched_tunable_scaling {
#ifdef CONFIG_NUMA_BALANCING
extern int sysctl_numa_balancing_mode;
+extern unsigned int sysctl_numa_balancing_promote_rate_limit;
#else
#define sysctl_numa_balancing_mode 0
#endif
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 81cab4b01edc..d6c48163c6de 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -127,6 +127,9 @@ static inline void put_task_struct_many(struct task_struct *t, int nr)
void put_task_struct_rcu_user(struct task_struct *task);
+/* Free all architecture-specific resources held by a thread. */
+void release_thread(struct task_struct *dead_task);
+
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index a193884ecf2b..4f765bc788ff 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -84,7 +84,7 @@ struct scmi_protocol_handle;
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_clock_info *(*info_get)
+ const struct scmi_clock_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 *rate);
@@ -466,7 +466,7 @@ enum scmi_sensor_class {
*/
struct scmi_sensor_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_sensor_info *(*info_get)
+ const struct scmi_sensor_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 sensor_id);
int (*trip_point_config)(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value);
diff --git a/include/linux/security.h b/include/linux/security.h
index 7bd0c490703d..ca1b7109c0db 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -114,6 +114,7 @@ enum lockdown_reason {
LOCKDOWN_IOPORT,
LOCKDOWN_MSR,
LOCKDOWN_ACPI_TABLES,
+ LOCKDOWN_DEVICE_TREE,
LOCKDOWN_PCMCIA_CIS,
LOCKDOWN_TIOCSSERIAL,
LOCKDOWN_MODULE_PARAMETERS,
@@ -122,6 +123,7 @@ enum lockdown_reason {
LOCKDOWN_XMON_WR,
LOCKDOWN_BPF_WRITE_USER,
LOCKDOWN_DBG_WRITE_KERNEL,
+ LOCKDOWN_RTAS_ERROR_INJECTION,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
@@ -437,6 +439,7 @@ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
+int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
int security_msg_msg_alloc(struct msg_msg *msg);
@@ -461,7 +464,7 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
char **value);
int security_setprocattr(const char *lsm, const char *name, void *value,
size_t size);
@@ -1194,6 +1197,11 @@ static inline int security_task_prctl(int option, unsigned long arg2,
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }
+static inline int security_create_user_ns(const struct cred *cred)
+{
+ return 0;
+}
+
static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
@@ -1301,7 +1309,7 @@ static inline void security_d_instantiate(struct dentry *dentry,
{ }
static inline int security_getprocattr(struct task_struct *p, const char *lsm,
- char *name, char **value)
+ const char *name, char **value)
{
return -EINVAL;
}
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 1ac0d712a9c3..6f837bb6c715 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -43,6 +43,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
case IOC_OPAL_GENERIC_TABLE_RW:
+ case IOC_OPAL_GET_STATUS:
return true;
}
return false;
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 3368c261ab62..66f624fc618c 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/delay.h>
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8c7b793aa4d7..19376bee9667 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -32,7 +32,7 @@ struct plat_serial8250_port {
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -74,6 +74,7 @@ struct uart_8250_port;
struct uart_8250_ops {
int (*setup_irq)(struct uart_8250_port *);
void (*release_irq)(struct uart_8250_port *);
+ void (*setup_timer)(struct uart_8250_port *);
};
struct uart_8250_em485 {
@@ -157,7 +158,7 @@ extern int early_serial8250_setup(struct earlycon_device *device,
extern void serial8250_update_uartclk(struct uart_port *port,
unsigned int uartclk);
extern void serial8250_do_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old);
+ struct ktermios *termios, const struct ktermios *old);
extern void serial8250_do_set_ldisc(struct uart_port *port,
struct ktermios *termios);
extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 6e4f4765d209..d657f2a42a7b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -387,7 +387,7 @@ struct uart_ops {
void (*shutdown)(struct uart_port *);
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
@@ -422,7 +422,7 @@ struct uart_icount {
__u32 buf_overrun;
};
-typedef unsigned int __bitwise upf_t;
+typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
struct uart_port {
@@ -433,7 +433,7 @@ struct uart_port {
void (*serial_out)(struct uart_port *, int, int);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -513,23 +513,24 @@ struct uart_port {
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
+#define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19))
/* Port has hardware-assisted h/w flow control */
-#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
-#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20))
+#define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21))
#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
-#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
-#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
-#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
-#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
-#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
+#define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22))
+#define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23))
+#define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24))
+#define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25))
+#define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26))
/* The exact UART type is known and should not be probed. */
-#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
-#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
-#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
-#define UPF_DEAD ((__force upf_t) (1 << 30))
-#define UPF_IOREMAP ((__force upf_t) (1 << 31))
+#define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27))
+#define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28))
+#define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29))
+#define UPF_DEAD ((__force upf_t) BIT_ULL(30))
+#define UPF_IOREMAP ((__force upf_t) BIT_ULL(31))
+#define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
@@ -624,6 +625,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
@@ -652,7 +670,7 @@ void uart_write_wakeup(struct uart_port *port);
void uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud);
unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min,
+ const struct ktermios *old, unsigned int min,
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
@@ -933,5 +951,4 @@ static inline int uart_handle_break(struct uart_port *port)
!((cflag) & CLOCAL))
int uart_get_rs485_mode(struct uart_port *port);
-int uart_rs485_config(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 302094b855fb..d1f343853b6c 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -535,7 +535,7 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
+ unsigned long *support, unsigned long *interfaces);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
unsigned long *link_modes);
@@ -568,7 +568,8 @@ static inline bool sfp_may_have_phy(struct sfp_bus *bus,
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
- unsigned long *support)
+ unsigned long *support,
+ unsigned long *interfaces)
{
}
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index ff0b990de83d..d500ea967dc7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -92,17 +92,19 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
-extern bool shmem_is_huge(struct vm_area_struct *vma,
- struct inode *inode, pgoff_t index);
-static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+extern bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
+ pgoff_t index, bool shmem_huge_force);
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma,
+ bool shmem_huge_force)
{
- return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
+ return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff,
+ shmem_huge_force);
}
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
@@ -111,8 +113,8 @@ enum sgp_type {
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-extern int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
+ enum sgp_type sgp);
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 18e163a3460d..7be5bb4c94b6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -533,6 +533,13 @@ enum {
struct ubuf_info {
void (*callback)(struct sk_buff *, struct ubuf_info *,
bool zerocopy_success);
+ refcount_t refcnt;
+ u8 flags;
+};
+
+struct ubuf_info_msgzc {
+ struct ubuf_info ubuf;
+
union {
struct {
unsigned long desc;
@@ -545,8 +552,6 @@ struct ubuf_info {
u32 bytelen;
};
};
- refcount_t refcnt;
- u8 flags;
struct mmpin {
struct user_struct *user;
@@ -555,6 +560,8 @@ struct ubuf_info {
};
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+#define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \
+ ubuf)
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
@@ -796,6 +803,7 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @scm_io_uring: SKB holds io_uring registered files
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
@@ -975,6 +983,7 @@ struct sk_buff {
#endif
__u8 slow_gro:1;
__u8 csum_not_inet:1;
+ __u8 scm_io_uring:1;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -1195,7 +1204,8 @@ static inline bool skb_unref(struct sk_buff *skb)
return true;
}
-void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+void __fix_address
+kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
/**
* kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
@@ -1460,8 +1470,8 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
unsigned int key_count);
struct bpf_flow_dissector;
-bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
- __be16 proto, int nhoff, int hlen, unsigned int flags);
+u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
@@ -2608,20 +2618,6 @@ void *skb_pull_data(struct sk_buff *skb, size_t len);
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb_headlen(skb) &&
- !__pskb_pull_tail(skb, len - skb_headlen(skb)))
- return NULL;
- skb->len -= len;
- return skb->data += len;
-}
-
-static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
-}
-
static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (likely(len <= skb_headlen(skb)))
@@ -2631,6 +2627,15 @@ static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
+static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (!pskb_may_pull(skb, len))
+ return NULL;
+
+ skb->len -= len;
+ return skb->data += len;
+}
+
void skb_condense(struct sk_buff *skb);
/**
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0fefdf528e0d..90877fcde70b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -29,6 +29,8 @@
#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
/* DEBUG: Poison objects */
#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+/* Indicate a kmalloc slab */
+#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
/* Align objs on cache lines */
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
/* Use GFP_DMA memory */
@@ -106,7 +108,7 @@
# define SLAB_ACCOUNT 0
#endif
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
#else
#define SLAB_KASAN 0
@@ -119,6 +121,12 @@
*/
#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
+#ifdef CONFIG_KFENCE
+#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
+#else
+#define SLAB_SKIP_KFENCE 0
+#endif
+
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
@@ -184,11 +192,25 @@ int kmem_cache_shrink(struct kmem_cache *s);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
+void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
void kfree(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
+
+/**
+ * ksize - Report actual allocation size of associated object
+ *
+ * @objp: Pointer returned from a prior kmalloc()-family allocation.
+ *
+ * This should not be used for writing beyond the originally requested
+ * allocation size. Either use krealloc() or round up the allocation size
+ * with kmalloc_size_roundup() prior to allocation. If this is used to
+ * access beyond the originally requested allocation size, UBSAN_BOUNDS
+ * and/or FORTIFY_SOURCE may trip, since they only know about the
+ * originally allocated size via the __alloc_size attribute.
+ */
size_t ksize(const void *objp);
+
#ifdef CONFIG_PRINTK
bool kmem_valid_obj(void *object);
void kmem_dump_obj(void *object);
@@ -243,27 +265,17 @@ static inline unsigned int arch_slab_minalign(void)
#ifdef CONFIG_SLAB
/*
- * The largest kmalloc size supported by the SLAB allocators is
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
- * less than 32 MB.
- *
- * WARNING: Its not easy to increase this value since the allocators have
- * to do various tricks to work around compiler limitations in order to
- * ensure proper constant folding.
+ * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
-#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
+#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
#endif
#ifdef CONFIG_SLUB
-/*
- * SLUB directly allocates requests fitting in to an order-1 page
- * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
- */
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
@@ -415,10 +427,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
@@ -428,6 +436,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
+static_assert(PAGE_SHIFT <= 20);
#define kmalloc_index(s) __kmalloc_index(s, true)
#endif /* !CONFIG_SLOB */
@@ -456,42 +465,22 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
__alloc_size(1);
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#else
-static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
- __assume_slab_alignment __alloc_size(3);
-
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment
- __alloc_size(4);
-#else
-static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags, int node, size_t size)
-{
- return kmem_cache_alloc_trace(s, gfpflags, size);
-}
-#endif /* CONFIG_NUMA */
+void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_kmalloc_alignment __alloc_size(3);
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size) __assume_kmalloc_alignment
+ __alloc_size(4);
#else /* CONFIG_TRACING */
-static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
+/* Save a function call when CONFIG_TRACING=n */
+static __always_inline __alloc_size(3)
+void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
{
void *ret = kmem_cache_alloc(s, flags);
@@ -499,8 +488,9 @@ static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_
return ret;
}
-static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
+static __always_inline __alloc_size(4)
+void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
@@ -509,25 +499,11 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
- __alloc_size(1);
+void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+ __alloc_size(1);
-#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
- __assume_page_alignment __alloc_size(1);
-#else
-static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
- unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
+void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+ __alloc_size(1);
/**
* kmalloc - allocate memory
@@ -597,7 +573,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(
+ return kmalloc_trace(
kmalloc_caches[kmalloc_type(flags)][index],
flags, size);
#endif
@@ -605,23 +581,35 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
+#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
-#ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE) {
- unsigned int i = kmalloc_index(size);
+ if (__builtin_constant_p(size)) {
+ unsigned int index;
- if (!i)
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ index = kmalloc_index(size);
+
+ if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
+ return kmalloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags)][index],
+ flags, node, size);
}
-#endif
return __kmalloc_node(size, flags, node);
}
+#else
+static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large_node(size, flags, node);
+
+ return __kmalloc_node(size, flags, node);
+}
+#endif
/**
* kmalloc_array - allocate memory for an array.
@@ -647,10 +635,10 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
*/
-static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
- size_t new_n,
- size_t new_size,
- gfp_t flags)
+static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
+ size_t new_n,
+ size_t new_size,
+ gfp_t flags)
{
size_t bytes;
@@ -671,6 +659,12 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ _RET_IP_)
+
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
@@ -679,9 +673,9 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
#define kmalloc_track_caller(size, flags) \
- __kmalloc_track_caller(size, flags, _RET_IP_)
+ __kmalloc_node_track_caller(size, flags, \
+ NUMA_NO_NODE, _RET_IP_)
static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
int node)
@@ -700,21 +694,6 @@ static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t
return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
- unsigned long caller) __alloc_size(1);
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node_track_caller(size, flags, node, \
- _RET_IP_)
-
-#else /* CONFIG_NUMA */
-
-#define kmalloc_node_track_caller(size, flags, node) \
- kmalloc_track_caller(size, flags)
-
-#endif /* CONFIG_NUMA */
-
/*
* Shortcuts
*/
@@ -774,11 +753,28 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
}
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
- __alloc_size(3);
+ __realloc_size(3);
extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
+
+/**
+ * kmalloc_size_roundup - Report allocation bucket size for the given size
+ *
+ * @size: Number of bytes to round up from.
+ *
+ * This returns the number of bytes that would be available in a kmalloc()
+ * allocation of @size bytes. For example, a 126 byte request would be
+ * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
+ * for the general-purpose kmalloc()-based allocations, and is not for the
+ * pre-sized kmem_cache_alloc()-based allocations.)
+ *
+ * Use this to kmalloc() the full bucket size ahead of time instead of using
+ * ksize() to query the size after an allocation.
+ */
+size_t kmalloc_size_roundup(size_t size);
+
void __init kmem_cache_init_late(void);
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index 88eb832eac7b..c9cabb679cd1 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -152,4 +152,16 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
unsigned long timeout, bool atomic);
+/*
+ * Process incoming messages in atomic context.
+ * This only guarantees that messages arrive as far as the recv_message_early
+ * callback; drivers expecting to handle incoming messages synchronously
+ * by calling this function must do it that way.
+ * Will return 1 if some data was processed, 0 if none was, or a
+ * negative error code on failure.
+ *
+ * @rtk: RTKit reference
+ */
+int apple_rtkit_poll(struct apple_rtkit *rtk);
+
#endif /* _LINUX_APPLE_RTKIT_H_ */
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 59117d970daf..d2b02bb43768 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -65,4 +65,6 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next);
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val);
+
#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
index a0f4f51a3b45..b335c2837cd8 100644
--- a/include/linux/soc/mediatek/mtk-mutex.h
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -20,6 +20,8 @@ enum mtk_mutex_mod_index {
MUTEX_MOD_IDX_MDP_WDMA,
MUTEX_MOD_IDX_MDP_AAL0,
MUTEX_MOD_IDX_MDP_CCORR0,
+ MUTEX_MOD_IDX_MDP_HDR0,
+ MUTEX_MOD_IDX_MDP_COLOR0,
MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */
};
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
index 082398e0cfb1..0761128b4354 100644
--- a/include/linux/soc/mediatek/mtk_sip_svc.h
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -22,4 +22,7 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
ARM_SMCCC_OWNER_SIP, fn_id)
+/* IOMMU related SMC call */
+#define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514)
+
#endif
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index 7e00cca06709..4450c8b7a1cb 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -11,9 +11,15 @@
struct mtk_wed_hw;
struct mtk_wdma_desc;
+enum mtk_wed_bus_tye {
+ MTK_WED_BUS_PCIE,
+ MTK_WED_BUS_AXI,
+};
+
struct mtk_wed_ring {
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
+ u32 desc_size;
int size;
u32 reg_base;
@@ -42,13 +48,24 @@ struct mtk_wed_device {
/* filled by driver: */
struct {
- struct pci_dev *pci_dev;
+ union {
+ struct platform_device *platform_dev;
+ struct pci_dev *pci_dev;
+ };
+ enum mtk_wed_bus_tye bus_type;
u32 wpdma_phys;
+ u32 wpdma_int;
+ u32 wpdma_mask;
+ u32 wpdma_tx;
+ u32 wpdma_txfree;
u16 token_start;
unsigned int nbuf;
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 txfree_tbit;
+
u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
int (*offload_enable)(struct mtk_wed_device *wed);
void (*offload_disable)(struct mtk_wed_device *wed);
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 9ed5384c5ca1..bc2fb8343a94 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -78,11 +78,40 @@ struct llcc_edac_reg_data {
u8 ways_shift;
};
+struct llcc_edac_reg_offset {
+ /* LLCC TRP registers */
+ u32 trp_ecc_error_status0;
+ u32 trp_ecc_error_status1;
+ u32 trp_ecc_sb_err_syn0;
+ u32 trp_ecc_db_err_syn0;
+ u32 trp_ecc_error_cntr_clear;
+ u32 trp_interrupt_0_status;
+ u32 trp_interrupt_0_clear;
+ u32 trp_interrupt_0_enable;
+
+ /* LLCC Common registers */
+ u32 cmn_status0;
+ u32 cmn_interrupt_0_enable;
+ u32 cmn_interrupt_2_enable;
+
+ /* LLCC DRP registers */
+ u32 drp_ecc_error_cfg;
+ u32 drp_ecc_error_cntr_clear;
+ u32 drp_interrupt_status;
+ u32 drp_interrupt_clear;
+ u32 drp_interrupt_enable;
+ u32 drp_ecc_error_status0;
+ u32 drp_ecc_error_status1;
+ u32 drp_ecc_sb_err_syn0;
+ u32 drp_ecc_db_err_syn0;
+};
+
/**
* struct llcc_drv_data - Data associated with the llcc driver
* @regmap: regmap associated with the llcc device
* @bcast_regmap: regmap associated with llcc broadcast offset
* @cfg: pointer to the data structure for slice configuration
+ * @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
* @max_slices: max slices as read from device tree
@@ -96,6 +125,7 @@ struct llcc_drv_data {
struct regmap *regmap;
struct regmap *bcast_regmap;
const struct llcc_slice_config *cfg;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
u32 cfg_size;
u32 max_slices;
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index b1f80e756d2a..469e02d2aa0d 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -75,7 +75,7 @@ struct qmi_elem_info {
enum qmi_array_type array_type;
u8 tlv_type;
u32 offset;
- struct qmi_elem_info *ei_array;
+ const struct qmi_elem_info *ei_array;
};
#define QMI_RESULT_SUCCESS_V01 0
@@ -102,7 +102,7 @@ struct qmi_response_type_v01 {
u16 error;
};
-extern struct qmi_elem_info qmi_response_type_v01_ei[];
+extern const struct qmi_elem_info qmi_response_type_v01_ei[];
/**
* struct qmi_service - context to track lookup-results
@@ -173,7 +173,7 @@ struct qmi_txn {
struct completion completion;
int result;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
void *dest;
};
@@ -189,7 +189,7 @@ struct qmi_msg_handler {
unsigned int type;
unsigned int msg_id;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
size_t decoded_size;
void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -249,23 +249,23 @@ void qmi_handle_release(struct qmi_handle *qmi);
ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
- int msg_id, size_t len, struct qmi_elem_info *ei,
+ int msg_id, size_t len, const struct qmi_elem_info *ei,
const void *c_struct);
void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
- unsigned int txn_id, struct qmi_elem_info *ei,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
const void *c_struct);
int qmi_decode_message(const void *buf, size_t len,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
void qmi_txn_cancel(struct qmi_txn *txn);
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 82c9d489833a..3ab8c07f71c0 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -41,6 +41,7 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768
#define QCOM_SMD_RPM_PKA_CLK 0x616b70
#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
+#define QCOM_SMD_RPM_MMXI_CLK 0x69786d6d
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h
index c5f663bba9c2..60e274d1b821 100644
--- a/include/linux/soc/sunxi/sunxi_sram.h
+++ b/include/linux/soc/sunxi/sunxi_sram.h
@@ -14,6 +14,6 @@
#define _SUNXI_SRAM_H_
int sunxi_sram_claim(struct device *dev);
-int sunxi_sram_release(struct device *dev);
+void sunxi_sram_release(struct device *dev);
#endif /* _SUNXI_SRAM_H_ */
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index d45902fb4cad..bae5e2369b4f 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -64,6 +64,11 @@ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
return 0;
}
+static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
+{
+ return copy_to_sockptr_offset(dst, 0, src, size);
+}
+
static inline void *memdup_sockptr(sockptr_t src, size_t len)
{
void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 39058c841469..9e4537f409c2 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -839,6 +839,8 @@ struct sdw_defer {
* @set_bus_conf: Set the bus configuration
* @pre_bank_switch: Callback for pre bank switch
* @post_bank_switch: Callback for post bank switch
+ * @read_ping_status: Read status from PING frames, reported with two bits per Device.
+ * Bits 31:24 are reserved.
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
@@ -855,6 +857,7 @@ struct sdw_master_ops {
struct sdw_bus_params *params);
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
+ u32 (*read_ping_status)(struct sdw_bus *bus);
};
@@ -889,6 +892,9 @@ struct sdw_master_ops {
* meaningful if multi_link is set. If set to 1, hardware-based
* synchronization will be used even if a stream only uses a single
* SoundWire segment.
+ * @dev_num_ida_min: if set, defines the minimum values for the IDA
+ * used to allocate system-unique device numbers. This value needs to be
+ * identical across all SoundWire bus in the system.
*/
struct sdw_bus {
struct device *dev;
@@ -913,12 +919,15 @@ struct sdw_bus {
u32 bank_switch_timeout;
bool multi_link;
int hw_sync_min_links;
+ int dev_num_ida_min;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
struct fwnode_handle *fwnode);
void sdw_bus_master_delete(struct sdw_bus *bus);
+void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay);
+
/**
* sdw_port_config: Master or Slave Port configuration
*
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index ec16ae49e6a4..2e9fd91572d4 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -15,32 +15,21 @@
#define SDW_LINK_SIZE 0x10000
/* Intel SHIM Registers Definition */
+/* LCAP */
#define SDW_SHIM_LCAP 0x0
-#define SDW_SHIM_LCTL 0x4
-#define SDW_SHIM_IPPTR 0x8
-#define SDW_SHIM_SYNC 0xC
-
-#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
-#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
-#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
-#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
-#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
-#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
+#define SDW_SHIM_LCAP_LCOUNT_MASK GENMASK(2, 0)
-#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
-#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
-#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x))
-#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
-#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
-
-#define SDW_SHIM_WAKEEN 0x190
-#define SDW_SHIM_WAKESTS 0x192
+/* LCTL */
+#define SDW_SHIM_LCTL 0x4
#define SDW_SHIM_LCTL_SPA BIT(0)
#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
#define SDW_SHIM_LCTL_CPA BIT(8)
#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
+/* SYNC */
+#define SDW_SHIM_SYNC 0xC
+
#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
@@ -49,19 +38,33 @@
#define SDW_SHIM_SYNC_CMDSYNC BIT(16)
#define SDW_SHIM_SYNC_SYNCGO BIT(24)
+/* Control stream capabililities and channel mask */
+#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
+#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
+#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
+#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
+#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
+
+/* PCM Stream capabilities */
+#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
+
#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
+/* PCM Stream Channel Map */
+#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
+
+/* PCM Stream Channel Count */
+#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
+
#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
#define SDW_SHIM_PCMSYCM_DIR BIT(15)
-#define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0)
-#define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4)
-#define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8)
-#define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13)
+/* IO control */
+#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
#define SDW_SHIM_IOCTL_MIF BIT(0)
#define SDW_SHIM_IOCTL_CO BIT(1)
@@ -73,13 +76,23 @@
#define SDW_SHIM_IOCTL_CIBD BIT(8)
#define SDW_SHIM_IOCTL_DIBD BIT(9)
-#define SDW_SHIM_CTMCTL_DACTQE BIT(0)
-#define SDW_SHIM_CTMCTL_DODS BIT(1)
-#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
+/* Wake Enable*/
+#define SDW_SHIM_WAKEEN 0x190
#define SDW_SHIM_WAKEEN_ENABLE BIT(0)
+
+/* Wake Status */
+#define SDW_SHIM_WAKESTS 0x192
+
#define SDW_SHIM_WAKESTS_STATUS BIT(0)
+/* AC Timing control */
+#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
+
+#define SDW_SHIM_CTMCTL_DACTQE BIT(0)
+#define SDW_SHIM_CTMCTL_DODS BIT(1)
+#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
+
/* Intel ALH Register definitions */
#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
#define SDW_ALH_NUM_STREAMS 64
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index f089ee1ead58..fbf8c0d95968 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -378,6 +378,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
* @dma_map_dev: device which can be used for DMA mapping
+ * @cur_rx_dma_dev: device which is currently used for RX DMA mapping
+ * @cur_tx_dma_dev: device which is currently used for TX DMA mapping
* @queued: whether this controller is providing an internal message queue
* @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
@@ -610,6 +612,8 @@ struct spi_controller {
struct spi_device *spi,
struct spi_transfer *xfer);
struct device *dma_map_dev;
+ struct device *cur_rx_dma_dev;
+ struct device *cur_tx_dma_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -848,6 +852,7 @@ struct spi_res {
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
* @dummy_data: indicates transfer is dummy bytes transfer.
+ * @cs_off: performs the transfer with chipselect off.
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
@@ -958,6 +963,7 @@ struct spi_transfer {
struct sg_table rx_sg;
unsigned dummy_data:1;
+ unsigned cs_off:1;
unsigned cs_change:1;
unsigned tx_nbits:3;
unsigned rx_nbits:3;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 5c0c5174155d..1341f7d62da4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
+#define __LINUX_INSIDE_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
@@ -492,4 +493,5 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 51fa0dab68c4..89eb6f4c659c 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_SMP_H
#define __LINUX_SPINLOCK_API_SMP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index b8ba00ccccde..819aeba1c87e 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_UP_H
#define __LINUX_SPINLOCK_API_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 835aedaf68ac..61c49b16f69a 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -2,7 +2,7 @@
#ifndef __LINUX_SPINLOCK_RT_H
#define __LINUX_SPINLOCK_RT_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
#error Do not include directly. Use spinlock.h
#endif
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 16521074b6f7..c87204247592 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_UP_H
#define __LINUX_SPINLOCK_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 6cfaa0a9a9b9..5aa5e0faf6a1 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -15,10 +15,10 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
- unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
- unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
+ unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
+ unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
struct swait_queue_head srcu_wq;
/* Last srcu_read_unlock() wakes GP. */
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
@@ -82,10 +82,12 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
int idx;
idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
- pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
tt, tf, idx,
data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
- data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])));
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
+ data_race(READ_ONCE(ssp->srcu_idx)),
+ data_race(READ_ONCE(ssp->srcu_idx_max)));
}
#endif
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index bc2797955de9..9ca7798d7a31 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -14,9 +14,15 @@
#include <linux/gfp.h>
typedef u32 depot_stack_handle_t;
+/*
+ * Number of bits in the handle that stack depot doesn't use. Users may store
+ * information in them.
+ */
+#define STACK_DEPOT_EXTRA_BITS 5
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
+ unsigned int extra_bits,
gfp_t gfp_flags, bool can_alloc);
/*
@@ -59,6 +65,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
+
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 7df06931f25d..ff277ced50e9 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -50,6 +50,8 @@ struct kstat {
struct timespec64 btime; /* File creation time */
u64 blocks;
u64 mnt_id;
+ u32 dio_mem_align;
+ u32 dio_offset_align;
};
#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 8df475db88c0..fb2e88614f5d 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -257,7 +257,6 @@ struct plat_stmmacenet_data {
u8 vlan_fail_q;
unsigned int eee_usecs_rate;
struct pci_dev *pdev;
- bool has_crossts;
int int_snapshot_num;
int ext_snapshot_num;
bool int_snapshot_en;
diff --git a/include/linux/string.h b/include/linux/string.h
index 61ec7e4f6311..cf7607b32102 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -261,6 +261,49 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
int pad);
/**
+ * strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ * @pad: Padding character to fill any remaining bytes of @dest after copy
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * an explicit padding character. If padding is not required, use strtomem().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
+} while (0)
+
+/**
+ * strtomem - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * without trailing padding. If padding is required, use strtomem_pad().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem(dest, src) do { \
+ const size_t _dest_len = __builtin_object_size(dest, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
+} while (0)
+
+/**
* memset_after - Set a value after a struct member to the end of a struct
*
* @obj: Address of target struct instance
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 4d72258d42fd..8530c7328269 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -21,6 +21,8 @@ enum string_size_units {
void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
char *buf, int len);
+int parse_int_array_user(const char __user *from, size_t count, int **array);
+
#define UNESCAPE_SPACE BIT(0)
#define UNESCAPE_OCTAL BIT(1)
#define UNESCAPE_HEX BIT(2)
@@ -126,4 +128,9 @@ static inline const char *str_enabled_disabled(bool v)
return v ? "enabled" : "disabled";
}
+static inline const char *str_read_write(bool v)
+{
+ return v ? "read" : "write";
+}
+
#endif
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 75eea5ebb179..770ef2cb5775 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -246,6 +246,7 @@ void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
+void rpc_clnt_disconnect(struct rpc_clnt *clnt);
void rpc_cleanup_clids(void);
static inline int rpc_reply_expected(struct rpc_task *task)
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index acc62647317c..b8ca3ecaf8d7 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -209,11 +209,17 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
+void rpc_task_try_cancel(struct rpc_task *task, int error);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
@@ -252,7 +258,7 @@ int rpc_malloc(struct rpc_task *);
void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
-int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
+int rpc_wait_for_completion_task(struct rpc_task *task);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct net;
void rpc_show_tasks(struct net *);
@@ -264,11 +270,6 @@ extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
gfp_t rpc_task_gfp_mask(void);
-static inline int rpc_wait_for_completion_task(struct rpc_task *task)
-{
- return __rpc_wait_for_completion_task(task, NULL);
-}
-
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static inline const char * rpc_qname(const struct rpc_wait_queue *q)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index daecb009c05b..88de45491376 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -472,6 +472,7 @@ struct svc_procedure {
/* XDR free result: */
void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
+ unsigned int pc_argzero; /* how much of argument to clear */
unsigned int pc_ressize; /* result struct size */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
@@ -544,16 +545,27 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
}
/**
- * svcxdr_init_decode - Prepare an xdr_stream for svc Call decoding
+ * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
* @rqstp: controlling server RPC transaction context
*
+ * This function currently assumes the RPC header in rq_arg has
+ * already been decoded. Upon return, xdr->p points to the
+ * location of the upper layer header.
*/
static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
- struct kvec *argv = rqstp->rq_arg.head;
+ struct xdr_buf *buf = &rqstp->rq_arg;
+ struct kvec *argv = buf->head;
- xdr_init_decode(xdr, &rqstp->rq_arg, argv->iov_base, NULL);
+ /*
+ * svc_getnl() and friends do not keep the xdr_buf's ::len
+ * field up to date. Refresh that field before initializing
+ * the argument decoding stream.
+ */
+ buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
+
+ xdr_init_decode(xdr, buf, argv->iov_base, NULL);
xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
}
@@ -576,7 +588,7 @@ static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
buf->len = resv->iov_len;
xdr->page_ptr = buf->pages - 1;
- buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages);
+ buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
buf->buflen -= rqstp->rq_auth_slack;
xdr->rqst = NULL;
}
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 69175029abbb..f84e2a1358e1 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -240,6 +240,8 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
+extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, struct rpc_rqst *rqst);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
size_t nbytes);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 70f2921e2e70..cfe19a028918 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -75,7 +75,7 @@ extern struct suspend_stats suspend_stats;
static inline void dpm_save_failed_dev(const char *name)
{
- strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
name,
sizeof(suspend_stats.failed_devs[0]));
suspend_stats.last_failed_dev++;
@@ -191,6 +191,7 @@ struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
int (*prepare_late)(void);
+ void (*check)(void);
bool (*wake)(void);
void (*restore_early)(void);
void (*restore)(void);
@@ -510,8 +511,8 @@ extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-extern void lock_system_sleep(void);
-extern void unlock_system_sleep(void);
+extern unsigned int lock_system_sleep(void);
+extern void unlock_system_sleep(unsigned int);
#else /* !CONFIG_PM_SLEEP */
@@ -534,8 +535,8 @@ static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
-static inline void lock_system_sleep(void) {}
-static inline void unlock_system_sleep(void) {}
+static inline unsigned int lock_system_sleep(void) { return 0; }
+static inline void unlock_system_sleep(unsigned int flags) {}
#endif /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swab.h b/include/linux/swab.h
index bcff5149861a..9b804dbb0d79 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -20,4 +20,29 @@
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
+
+static inline void swab16_array(u16 *buf, unsigned int words)
+{
+ while (words--) {
+ swab16s(buf);
+ buf++;
+ }
+}
+
+static inline void swab32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ swab32s(buf);
+ buf++;
+ }
+}
+
+static inline void swab64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ swab64s(buf);
+ buf++;
+ }
+}
+
#endif /* _LINUX_SWAB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 43150b9bbc5c..a18cf4b7c724 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -162,6 +162,10 @@ union swap_header {
*/
struct reclaim_state {
unsigned long reclaimed_slab;
+#ifdef CONFIG_LRU_GEN
+ /* per-thread mm walk data */
+ struct lru_gen_mm_walk *mm_walk;
+#endif
};
#ifdef __KERNEL__
@@ -351,6 +355,11 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
return entry;
}
+static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
+{
+ folio->private = (void *)entry.val;
+}
+
/* linux/mm/workingset.c */
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
@@ -375,11 +384,11 @@ extern unsigned long totalreserve_pages;
/* linux/mm/swap.c */
-extern void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_pages);
-extern void lru_note_cost_folio(struct folio *);
-extern void folio_add_lru(struct folio *);
-extern void lru_cache_add(struct page *);
+void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
+void lru_note_cost_folio(struct folio *);
+void folio_add_lru(struct folio *);
+void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+void lru_cache_add(struct page *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);
@@ -481,7 +490,8 @@ static inline long get_nr_swap_pages(void)
extern void si_swapinfo(struct sysinfo *);
swp_entry_t folio_alloc_swap(struct folio *folio);
-extern void put_swap_page(struct page *page, swp_entry_t entry);
+bool folio_free_swap(struct folio *folio);
+void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
@@ -500,7 +510,6 @@ extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
@@ -566,7 +575,7 @@ static inline void swap_free(swp_entry_t swp)
{
}
-static inline void put_swap_page(struct page *page, swp_entry_t swp)
+static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
}
@@ -585,11 +594,6 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline int try_to_free_swap(struct page *page)
-{
- return 0;
-}
-
static inline swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
@@ -597,6 +601,11 @@ static inline swp_entry_t folio_alloc_swap(struct folio *folio)
return entry;
}
+static inline bool folio_free_swap(struct folio *folio)
+{
+ return false;
+}
+
static inline int add_swap_extent(struct swap_info_struct *sis,
unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
@@ -657,7 +666,7 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
cgroup_throttle_swaprate(&folio->page, gfp);
}
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
@@ -677,7 +686,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
}
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
+extern bool mem_cgroup_swap_full(struct folio *folio);
#else
static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
@@ -699,7 +708,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index a12dd1c3966c..ae73a87775b3 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -4,7 +4,7 @@
#include <linux/swap.h>
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
@@ -40,6 +40,6 @@ static inline void swap_cgroup_swapoff(int type)
return;
}
-#endif /* CONFIG_MEMCG_SWAP */
+#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 54078542134c..7ed529a77c5b 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -8,6 +8,11 @@
*/
extern struct swap_info_struct *swap_info[];
extern unsigned long generic_max_swapfile_size(void);
-extern unsigned long max_swapfile_size(void);
+unsigned long arch_max_swapfile_size(void);
+
+/* Maximum swapfile size supported for the arch (not inclusive). */
+extern unsigned long swapfile_maximum_size;
+/* Whether swap migration entry supports storing A/D bits for the arch */
+extern bool swap_migration_ad_supported;
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index a3d435bf9f97..86b95ccb81bb 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -8,6 +8,10 @@
#ifdef CONFIG_MMU
+#ifdef CONFIG_SWAP
+#include <linux/swapfile.h>
+#endif /* CONFIG_SWAP */
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
@@ -23,6 +27,45 @@
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
+/*
+ * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
+ * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
+ * can use the extra bits to store other information besides PFN.
+ */
+#ifdef MAX_PHYSMEM_BITS
+#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#else /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
+#endif /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
+
+/**
+ * Migration swap entry specific bitfield definitions. Layout:
+ *
+ * |----------+--------------------|
+ * | swp_type | swp_offset |
+ * |----------+--------+-+-+-------|
+ * | | resv |D|A| PFN |
+ * |----------+--------+-+-+-------|
+ *
+ * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
+ * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
+ *
+ * Note: A/D bits will be stored in migration entries iff there're enough
+ * free bits in arch specific swp offset. By default we'll ignore A/D bits
+ * when migrating a page. Please refer to migration_entry_supports_ad()
+ * for more information. If there're more bits besides PFN and A/D bits,
+ * they should be reserved and always be zeros.
+ */
+#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
+#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
+#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
+
+#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
+#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
+
+static inline bool is_pfn_swap_entry(swp_entry_t entry);
+
/* Clear all flags but only keep swp_entry_t related information */
static inline pte_t pte_swp_clear_flags(pte_t pte)
{
@@ -64,6 +107,17 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
+/*
+ * This should only be called upon a pfn swap entry to get the PFN stored
+ * in the swap entry. Please refers to is_pfn_swap_entry() for definition
+ * of pfn swap entry.
+ */
+static inline unsigned long swp_offset_pfn(swp_entry_t entry)
+{
+ VM_BUG_ON(!is_pfn_swap_entry(entry));
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
@@ -240,6 +294,52 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
return swp_entry(SWP_MIGRATION_WRITE, offset);
}
+/*
+ * Returns whether the host has large enough swap offset field to support
+ * carrying over pgtable A/D bits for page migrations. The result is
+ * pretty much arch specific.
+ */
+static inline bool migration_entry_supports_ad(void)
+{
+#ifdef CONFIG_SWAP
+ return swap_migration_ad_supported;
+#else /* CONFIG_SWAP */
+ return false;
+#endif /* CONFIG_SWAP */
+}
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_YOUNG);
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_DIRTY);
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
+}
+
extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
@@ -247,8 +347,8 @@ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
#ifdef CONFIG_HUGETLB_PAGE
extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
-#endif
-#else
+#endif /* CONFIG_HUGETLB_PAGE */
+#else /* CONFIG_MIGRATION */
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
@@ -276,7 +376,7 @@ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
#ifdef CONFIG_HUGETLB_PAGE
static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
@@ -286,7 +386,26 @@ static inline int is_readable_migration_entry(swp_entry_t entry)
return 0;
}
-#endif
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
typedef unsigned long pte_marker;
@@ -369,7 +488,7 @@ static inline int pte_none_mostly(pte_t pte)
static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
{
- struct page *p = pfn_to_page(swp_offset(entry));
+ struct page *p = pfn_to_page(swp_offset_pfn(entry));
/*
* Any use of migration entries may only occur while the
@@ -387,6 +506,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
+ /* Make sure the swp offset can always store the needed fields */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
return is_migration_entry(entry) || is_device_private_entry(entry) ||
is_device_exclusive_entry(entry);
}
@@ -426,7 +548,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
-#else
+#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
@@ -455,7 +577,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
@@ -475,27 +597,17 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
-static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
-{
- return swp_offset(entry);
-}
-
static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
}
-static inline void num_poisoned_pages_dec(void)
-{
- atomic_long_dec(&num_poisoned_pages);
-}
-
static inline void num_poisoned_pages_sub(long i)
{
atomic_long_sub(i, &num_poisoned_pages);
}
-#else
+#else /* CONFIG_MEMORY_FAILURE */
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
@@ -514,7 +626,7 @@ static inline void num_poisoned_pages_inc(void)
static inline void num_poisoned_pages_sub(long i)
{
}
-#endif
+#endif /* CONFIG_MEMORY_FAILURE */
static inline int non_swap_entry(swp_entry_t entry)
{
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 86af908e2663..955f80e34d4f 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -8,6 +8,8 @@
#ifndef _LINUX_SYSLOG_H
#define _LINUX_SYSLOG_H
+#include <linux/wait.h>
+
/* Close the log. Currently a NOP. */
#define SYSLOG_ACTION_CLOSE 0
/* Open the log. Currently a NOP. */
@@ -35,5 +37,6 @@
#define SYSLOG_FROM_PROC 1
int do_syslog(int type, char __user *buf, int count, int source);
+extern wait_queue_head_t log_wait;
#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a9fbe22732c3..41b1da621a45 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -295,7 +295,7 @@ struct tcp_sock {
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
u32 max_packets_out; /* max packets_out in last window */
- u32 max_packets_seq; /* right edge of max_packets_out flight */
+ u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
u16 urg_data; /* Saved octet of OOB data and control flags */
u8 ecn_flags; /* ECN status bits. */
@@ -388,6 +388,12 @@ struct tcp_sock {
u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs
* values defined in uapi/linux/tcp.h
*/
+ u8 bpf_chg_cc_inprogress:1; /* In the middle of
+ * bpf_setsockopt(TCP_CONGESTION),
+ * it is to avoid the bpf_tcp_cc->init()
+ * to recur itself by calling
+ * bpf_setsockopt(TCP_CONGESTION, "itself").
+ */
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG)
#else
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
diff --git a/include/linux/termios_internal.h b/include/linux/termios_internal.h
new file mode 100644
index 000000000000..d77f29e5e2b7
--- /dev/null
+++ b/include/linux/termios_internal.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TERMIOS_CONV_H
+#define _LINUX_TERMIOS_CONV_H
+
+#include <linux/uaccess.h>
+#include <asm/termios.h>
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^O werase=^W lnext=^V
+ eol2=\0
+*/
+
+#ifdef VDSUSP
+#define INIT_C_CC_VDSUSP_EXTRA [VDSUSP] = 'Y'-0x40,
+#else
+#define INIT_C_CC_VDSUSP_EXTRA
+#endif
+
+#define INIT_C_CC { \
+ [VINTR] = 'C'-0x40, \
+ [VQUIT] = '\\'-0x40, \
+ [VERASE] = '\177', \
+ [VKILL] = 'U'-0x40, \
+ [VEOF] = 'D'-0x40, \
+ [VSTART] = 'Q'-0x40, \
+ [VSTOP] = 'S'-0x40, \
+ [VSUSP] = 'Z'-0x40, \
+ [VREPRINT] = 'R'-0x40, \
+ [VDISCARD] = 'O'-0x40, \
+ [VWERASE] = 'W'-0x40, \
+ [VLNEXT] = 'V'-0x40, \
+ INIT_C_CC_VDSUSP_EXTRA \
+ [VMIN] = 1 }
+
+int user_termio_to_kernel_termios(struct ktermios *, struct termio __user *);
+int kernel_termios_to_user_termio(struct termio __user *, struct ktermios *);
+#ifdef TCGETS2
+int user_termios_to_kernel_termios(struct ktermios *, struct termios2 __user *);
+int kernel_termios_to_user_termios(struct termios2 __user *, struct ktermios *);
+int user_termios_to_kernel_termios_1(struct ktermios *, struct termios __user *);
+int kernel_termios_to_user_termios_1(struct termios __user *, struct ktermios *);
+#else /* TCGETS2 */
+int user_termios_to_kernel_termios(struct ktermios *, struct termios __user *);
+int kernel_termios_to_user_termios(struct termios __user *, struct ktermios *);
+#endif /* TCGETS2 */
+
+#endif /* _LINUX_TERMIOS_CONV_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1386c713885d..9ecc128944a1 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -17,8 +17,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/thermal.h>
-#define THERMAL_MAX_TRIPS 12
-
/* invalid cooling state */
#define THERMAL_CSTATE_INVALID -1UL
@@ -296,82 +294,43 @@ struct thermal_zone_params {
int offset;
};
-/**
- * struct thermal_zone_of_device_ops - callbacks for handling DT based zones
- *
- * Mandatory:
- * @get_temp: a pointer to a function that reads the sensor temperature.
- *
- * Optional:
- * @get_trend: a pointer to a function that reads the sensor temperature trend.
- * @set_trips: a pointer to a function that sets a temperature window. When
- * this window is left the driver must inform the thermal core via
- * thermal_zone_device_update.
- * @set_emul_temp: a pointer to a function that sets sensor emulated
- * temperature.
- * @set_trip_temp: a pointer to a function that sets the trip temperature on
- * hardware.
- * @change_mode: a pointer to a function that notifies the thermal zone
- * mode change.
- */
-struct thermal_zone_of_device_ops {
- int (*get_temp)(void *, int *);
- int (*get_trend)(void *, int, enum thermal_trend *);
- int (*set_trips)(void *, int, int);
- int (*set_emul_temp)(void *, int);
- int (*set_trip_temp)(void *, int, int);
- int (*change_mode) (void *, enum thermal_device_mode);
-};
-
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
-int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id);
-struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops);
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz);
-struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops);
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz);
-#else
+struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
+ const struct thermal_zone_device_ops *ops);
-static inline int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id)
-{
- return -ENOENT;
-}
-static inline struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops)
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_device_ops *ops);
+
+void thermal_of_zone_unregister(struct thermal_zone_device *tz);
+
+void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz);
+
+void thermal_of_zone_unregister(struct thermal_zone_device *tz);
+
+#else
+static inline
+struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENOTSUPP);
}
static inline
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz)
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
+ return ERR_PTR(-ENOTSUPP);
}
-static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int id, void *data,
- const struct thermal_zone_of_device_ops *ops)
+static inline void thermal_of_zone_unregister(struct thermal_zone_device *tz)
{
- return ERR_PTR(-ENODEV);
}
-static inline
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tz)
+static inline void devm_thermal_of_zone_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
{
}
-
#endif
#ifdef CONFIG_THERMAL
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 9f442d73f3df..90cd08ab2f5d 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -187,6 +187,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2)
+ * @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
* queried first
@@ -234,6 +235,7 @@ struct tb_xdomain {
const char *device_name;
unsigned int link_speed;
unsigned int link_width;
+ bool link_usb4;
bool is_unplugged;
bool needs_uuid;
struct ida service_ids;
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 498dbcedb451..1c3948a1d6ad 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -21,7 +21,12 @@ struct tnum {
struct tnum tnum_const(u64 value);
/* A completely unknown value */
extern const struct tnum tnum_unknown;
-/* A value that's unknown except that @min <= value <= @max */
+/* An unknown value that is a superset of @min <= value <= @max.
+ *
+ * Could include values outside the range of [@min, @max].
+ * For example tnum_range(0, 2) is represented by {0, 1, 2, *3*},
+ * rather than the intended set of {0, 1, 2}.
+ */
struct tnum tnum_range(u64 min, u64 max);
/* Arithmetic and logical ops */
@@ -73,7 +78,18 @@ static inline bool tnum_is_unknown(struct tnum a)
*/
bool tnum_is_aligned(struct tnum a, u64 size);
-/* Returns true if @b represents a subset of @a. */
+/* Returns true if @b represents a subset of @a.
+ *
+ * Note that using tnum_range() as @a requires extra cautions as tnum_in() may
+ * return true unexpectedly due to tnum limited ability to represent tight
+ * range, e.g.
+ *
+ * tnum_in(tnum_range(0, 2), tnum_const(3)) == true
+ *
+ * As a rule of thumb, if @a is explicitly coded rather than coming from
+ * reg->var_off, it should be in form of tnum_const(), tnum_range(0, 2**n - 1),
+ * or tnum_range(2**n, 2**(n+1) - 1).
+ */
bool tnum_in(struct tnum a, struct tnum b);
/* Formatting functions. These have snprintf-like semantics: they will write
diff --git a/include/linux/trace.h b/include/linux/trace.h
index bf169612ffe1..b5e16e438448 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -2,8 +2,6 @@
#ifndef _LINUX_TRACE_H
#define _LINUX_TRACE_H
-#ifdef CONFIG_TRACING
-
#define TRACE_EXPORT_FUNCTION BIT(0)
#define TRACE_EXPORT_EVENT BIT(1)
#define TRACE_EXPORT_MARKER BIT(2)
@@ -28,6 +26,8 @@ struct trace_export {
int flags;
};
+#ifdef CONFIG_TRACING
+
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
@@ -48,6 +48,38 @@ void osnoise_arch_unregister(void);
void osnoise_trace_irq_entry(int id);
void osnoise_trace_irq_exit(int id, const char *desc);
+#else /* CONFIG_TRACING */
+static inline int register_ftrace_export(struct trace_export *export)
+{
+ return -EINVAL;
+}
+static inline int unregister_ftrace_export(struct trace_export *export)
+{
+ return 0;
+}
+static inline void trace_printk_init_buffers(void)
+{
+}
+static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
+ const char *fmt, ...)
+{
+ return 0;
+}
+static inline int trace_array_init_printk(struct trace_array *tr)
+{
+ return -EINVAL;
+}
+static inline void trace_array_put(struct trace_array *tr)
+{
+}
+static inline struct trace_array *trace_array_get_by_name(const char *name)
+{
+ return NULL;
+}
+static inline int trace_array_destroy(struct trace_array *tr)
+{
+ return 0;
+}
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 8401dec93c15..20749bd9db71 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -92,6 +92,7 @@ struct trace_iterator {
unsigned int temp_size;
char *fmt; /* modified format holder */
unsigned int fmt_size;
+ long wait_index;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 7b0a5d478ef6..730c3301d710 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -122,8 +122,6 @@ struct tty_operations;
/**
* struct tty_struct - state associated with a tty while open
*
- * @magic: magic value set early in @alloc_tty_struct to %TTY_MAGIC, for
- * debugging purposes
* @kref: reference counting by tty_kref_get() and tty_kref_put(), reaching zero
* frees the structure
* @dev: class device or %NULL (e.g. ptys, serdev)
@@ -193,7 +191,6 @@ struct tty_operations;
* &struct tty_port.
*/
struct tty_struct {
- int magic;
struct kref kref;
struct device *dev;
struct tty_driver *driver;
@@ -260,9 +257,6 @@ struct tty_file_private {
struct list_head list;
};
-/* tty magic number */
-#define TTY_MAGIC 0x5401
-
/**
* DOC: TTY Struct Flags
*
@@ -434,7 +428,7 @@ int tty_hung_up_p(struct file *filp);
void do_SAK(struct tty_struct *tty);
void __do_SAK(struct tty_struct *tty);
void no_tty(void);
-speed_t tty_termios_baud_rate(struct ktermios *termios);
+speed_t tty_termios_baud_rate(const struct ktermios *termios);
void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud,
speed_t obaud);
void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
@@ -458,7 +452,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
unsigned char tty_get_char_size(unsigned int cflag);
unsigned char tty_get_frame_size(unsigned int cflag);
-void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
+void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old);
int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b);
int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 4841d8069c07..e00034118c7b 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,6 +7,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/cdev.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/seq_file.h>
@@ -141,7 +142,7 @@ struct serial_struct;
*
* Optional.
*
- * @set_termios: ``void ()(struct tty_struct *tty, struct ktermios *old)``
+ * @set_termios: ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
* This routine allows the @tty driver to be notified when device's
* termios settings have changed. New settings are in @tty->termios.
@@ -365,7 +366,7 @@ struct tty_operations {
unsigned int cmd, unsigned long arg);
long (*compat_ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
- void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ void (*set_termios)(struct tty_struct *tty, const struct ktermios *old);
void (*throttle)(struct tty_struct * tty);
void (*unthrottle)(struct tty_struct * tty);
void (*stop)(struct tty_struct *tty);
@@ -396,7 +397,6 @@ struct tty_operations {
/**
* struct tty_driver -- driver for TTY devices
*
- * @magic: set to %TTY_DRIVER_MAGIC in __tty_alloc_driver()
* @kref: reference counting. Reaching zero frees all the internals and the
* driver.
* @cdevs: allocated/registered character /dev devices
@@ -432,7 +432,6 @@ struct tty_operations {
* @driver_name, @name, @type, @subtype, @init_termios, and @ops.
*/
struct tty_driver {
- int magic;
struct kref kref;
struct cdev **cdevs;
struct module *owner;
@@ -489,9 +488,6 @@ static inline void tty_set_operations(struct tty_driver *driver,
driver->ops = op;
}
-/* tty driver magic number */
-#define TTY_DRIVER_MAGIC 0x5402
-
/**
* DOC: TTY Driver Flags
*
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index ede6f2157f32..dcb61ec11424 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -130,7 +130,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* a pointer to wordsize-sensitive structure belongs here, but most of
* ldiscs will happily leave it %NULL.
*
- * @set_termios: [TTY] ``void ()(struct tty_struct *tty, struct ktermios *old)``
+ * @set_termios: [TTY] ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
* This function notifies the line discpline that a change has been made
* to the termios structure.
@@ -227,7 +227,7 @@ struct tty_ldisc_ops {
unsigned long arg);
int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
unsigned long arg);
- void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
+ void (*set_termios)(struct tty_struct *tty, const struct ktermios *old);
__poll_t (*poll)(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
void (*hangup)(struct tty_struct *tty);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 6ad4e9032d53..46040d66334a 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -8,7 +8,7 @@
*
* Key points :
*
- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
+ * - Use a seqcount on 32-bit
* - The whole thing is a no-op on 64-bit architectures.
*
* Usage constraints:
@@ -20,7 +20,8 @@
* writer and also spin forever.
*
* 3) Write side must use the _irqsave() variant if other writers, or a reader,
- * can be invoked from an IRQ context.
+ * can be invoked from an IRQ context. On 64bit systems this variant does not
+ * disable interrupts.
*
* 4) If reader fetches several counters, there is no guarantee the whole values
* are consistent w.r.t. each other (remember point #2: seqcounts are not
@@ -29,11 +30,6 @@
* 5) Readers are allowed to sleep or be preempted/interrupted: they perform
* pure reads.
*
- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
- * might be updated from a hardirq or softirq context (remember point #1:
- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
- * corrupted 64-bit values otherwise.
- *
* Usage :
*
* Stats producer (writer) should use following template granted it already got
@@ -66,7 +62,7 @@
#include <linux/seqlock.h>
struct u64_stats_sync {
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+#if BITS_PER_LONG == 32
seqcount_t seq;
#endif
};
@@ -98,7 +94,22 @@ static inline void u64_stats_inc(u64_stats_t *p)
local64_inc(&p->v);
}
-#else
+static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
+static inline unsigned long __u64_stats_irqsave(void) { return 0; }
+static inline void __u64_stats_irqrestore(unsigned long flags) { }
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return 0;
+}
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+ return false;
+}
+
+#else /* 64 bit */
typedef struct {
u64 v;
@@ -123,123 +134,95 @@ static inline void u64_stats_inc(u64_stats_t *p)
{
p->v++;
}
-#endif
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
-#else
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
+ seqcount_init(&syncp->seq);
}
-#endif
-static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- preempt_disable();
+ preempt_disable_nested();
write_seqcount_begin(&syncp->seq);
-#endif
}
-static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
write_seqcount_end(&syncp->seq);
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- preempt_enable();
-#endif
+ preempt_enable_nested();
}
-static inline unsigned long
-u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+static inline unsigned long __u64_stats_irqsave(void)
{
- unsigned long flags = 0;
+ unsigned long flags;
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- preempt_disable();
- else
- local_irq_save(flags);
- write_seqcount_begin(&syncp->seq);
-#endif
+ local_irq_save(flags);
return flags;
}
-static inline void
-u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
- unsigned long flags)
+static inline void __u64_stats_irqrestore(unsigned long flags)
{
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- write_seqcount_end(&syncp->seq);
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- preempt_enable();
- else
- local_irq_restore(flags);
-#endif
+ local_irq_restore(flags);
}
static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
return read_seqcount_begin(&syncp->seq);
-#else
- return 0;
-#endif
}
-static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
{
-#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
- preempt_disable();
-#endif
- return __u64_stats_fetch_begin(syncp);
+ return read_seqcount_retry(&syncp->seq, start);
}
+#endif /* !64 bit */
-static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- return read_seqcount_retry(&syncp->seq, start);
-#else
- return false;
-#endif
+ __u64_stats_update_begin(syncp);
+}
+
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+ __u64_stats_update_end(syncp);
+}
+
+static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+{
+ unsigned long flags = __u64_stats_irqsave();
+
+ __u64_stats_update_begin(syncp);
+ return flags;
+}
+
+static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+{
+ __u64_stats_update_end(syncp);
+ __u64_stats_irqrestore(flags);
+}
+
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+ return __u64_stats_fetch_begin(syncp);
}
static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
-#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
- preempt_enable();
-#endif
return __u64_stats_fetch_retry(syncp, start);
}
-/*
- * In case irq handlers can update u64 counters, readers can use following helpers
- * - SMP 32bit arches use seqcount protection, irq safe.
- * - UP 32bit must disable irqs.
- * - 64bit have no problem atomically reading u64 values, irq safe.
- */
+/* Obsolete interfaces */
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
- preempt_disable();
-#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
- local_irq_disable();
-#endif
- return __u64_stats_fetch_begin(syncp);
+ return u64_stats_fetch_begin(syncp);
}
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
unsigned int start)
{
-#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
- preempt_enable();
-#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
- local_irq_enable();
-#endif
- return __u64_stats_fetch_retry(syncp, start);
+ return u64_stats_fetch_retry(syncp, start);
}
#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 47e5d374c7eb..afb18f198843 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -58,20 +58,28 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- instrument_copy_from_user(to, from, n);
+ unsigned long res;
+
+ instrument_copy_from_user_before(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res;
+
might_fault();
+ instrument_copy_from_user_before(to, from, n);
if (should_fail_usercopy())
return n;
- instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
/**
@@ -115,8 +123,9 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- instrument_copy_from_user(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index 22345391350b..2516082cd3a9 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -23,7 +23,7 @@
#include <sound/ac97_codec.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*
* UCB1400 AC-link registers
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 5896af36199c..2e3134b14ffd 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -298,7 +298,7 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
shorted = iov_iter_count(i) - max_bytes;
iov_iter_truncate(i, max_bytes);
}
- npages = iov_iter_npages(i, INT_MAX);
+ npages = iov_iter_npages(i, maxpages);
if (shorted)
iov_iter_reexpand(i, iov_iter_count(i) + shorted);
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 244aff638220..5d1f6129b847 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -11,10 +11,11 @@
struct cred;
struct file;
-#define UMH_NO_WAIT 0 /* don't wait at all */
-#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
-#define UMH_WAIT_PROC 2 /* wait for the process to complete */
-#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
+#define UMH_NO_WAIT 0x00 /* don't wait at all */
+#define UMH_WAIT_EXEC 0x01 /* wait for the exec, but not the process */
+#define UMH_WAIT_PROC 0x02 /* wait for the process to complete */
+#define UMH_KILLABLE 0x04 /* wait for EXEC/PROC killable */
+#define UMH_FREEZABLE 0x08 /* wait for EXEC/PROC freezable */
struct subprocess_info {
struct work_struct work;
diff --git a/include/linux/units.h b/include/linux/units.h
index 681fc652e3d7..2793a41e73a2 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -20,6 +20,9 @@
#define PICO 1000000000000ULL
#define FEMTO 1000000000000000ULL
+#define NANOHZ_PER_HZ 1000000000UL
+#define MICROHZ_PER_HZ 1000000UL
+#define MILLIHZ_PER_HZ 1000UL
#define HZ_PER_KHZ 1000UL
#define KHZ_PER_MHZ 1000UL
#define HZ_PER_MHZ 1000000UL
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edf3342507f1..ee38835ed77c 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -62,6 +62,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
+#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 67f8713d3fa3..78cd566ee238 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -479,7 +479,6 @@ static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id,
const struct hc_driver *driver);
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 8ea319f89e1f..f7bfedb740f5 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -276,8 +276,8 @@ struct usb_serial_driver {
unsigned int cmd, unsigned long arg);
void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
- void (*set_termios)(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port,
+ const struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
unsigned int (*chars_in_buffer)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, long timeout);
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 20c0bedb8ec8..17657451c762 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -167,6 +167,11 @@
/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */
#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31
+#define tcpc_presenting_rd(reg, cc) \
+ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
+ (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
+ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+
struct tcpci;
/*
@@ -207,4 +212,21 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci);
struct tcpm_port;
struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci);
+
+static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
+{
+ switch (cc) {
+ case 0x1:
+ return sink ? TYPEC_CC_RP_DEF : TYPEC_CC_RA;
+ case 0x2:
+ return sink ? TYPEC_CC_RP_1_5 : TYPEC_CC_RD;
+ case 0x3:
+ if (sink)
+ return TYPEC_CC_RP_3_0;
+ fallthrough;
+ case 0x0:
+ default:
+ return TYPEC_CC_OPEN;
+ }
+}
#endif /* __LINUX_USB_TCPCI_H */
diff --git a/include/linux/user_events.h b/include/linux/user_events.h
index 736e05603463..592a3fbed98e 100644
--- a/include/linux/user_events.h
+++ b/include/linux/user_events.h
@@ -20,15 +20,6 @@
#define USER_EVENTS_SYSTEM "user_events"
#define USER_EVENTS_PREFIX "u:"
-/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */
-#define EVENT_BIT_FTRACE 0
-#define EVENT_BIT_PERF 1
-#define EVENT_BIT_OTHER 7
-
-#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE)
-#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF)
-#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER)
-
/* Create dynamic location entry within a 32-bit value */
#define DYN_LOC(offset, size) ((size) << 16 | (offset))
@@ -45,12 +36,12 @@ struct user_reg {
/* Input: Pointer to string with event name, description and flags */
__u64 name_args;
- /* Output: Byte index of the event within the status page */
- __u32 status_index;
+ /* Output: Bitwise index of the event within the status page */
+ __u32 status_bit;
/* Output: Index of the event to use when writing data */
__u32 write_index;
-};
+} __attribute__((__packed__));
#define DIAG_IOC_MAGIC '*'
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 33a4240e6a6f..45f09bec02c4 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -54,15 +54,17 @@ enum ucount_type {
UCOUNT_FANOTIFY_GROUPS,
UCOUNT_FANOTIFY_MARKS,
#endif
+ UCOUNT_COUNTS,
+};
+
+enum rlimit_type {
UCOUNT_RLIMIT_NPROC,
UCOUNT_RLIMIT_MSGQUEUE,
UCOUNT_RLIMIT_SIGPENDING,
UCOUNT_RLIMIT_MEMLOCK,
- UCOUNT_COUNTS,
+ UCOUNT_RLIMIT_COUNTS,
};
-#define MAX_PER_NAMESPACE_UCOUNTS UCOUNT_RLIMIT_NPROC
-
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -99,6 +101,7 @@ struct user_namespace {
#endif
struct ucounts *ucounts;
long ucount_max[UCOUNT_COUNTS];
+ long rlimit_max[UCOUNT_RLIMIT_COUNTS];
} __randomize_layout;
struct ucounts {
@@ -107,6 +110,7 @@ struct ucounts {
kuid_t uid;
atomic_t count;
atomic_long_t ucount[UCOUNT_COUNTS];
+ atomic_long_t rlimit[UCOUNT_RLIMIT_COUNTS];
};
extern struct user_namespace init_user_ns;
@@ -120,21 +124,26 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
void put_ucounts(struct ucounts *ucounts);
-static inline long get_ucounts_value(struct ucounts *ucounts, enum ucount_type type)
+static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type type)
{
- return atomic_long_read(&ucounts->ucount[type]);
+ return atomic_long_read(&ucounts->rlimit[type]);
}
-long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
-bool dec_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v);
-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type);
-void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum ucount_type type);
-bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max);
+long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
+
+static inline long get_userns_rlimit_max(struct user_namespace *ns, enum rlimit_type type)
+{
+ return READ_ONCE(ns->rlimit_max[type]);
+}
-static inline void set_rlimit_ucount_max(struct user_namespace *ns,
- enum ucount_type type, unsigned long max)
+static inline void set_userns_rlimit_max(struct user_namespace *ns,
+ enum rlimit_type type, unsigned long max)
{
- ns->ucount_max[type] = max <= LONG_MAX ? max : LONG_MAX;
+ ns->rlimit_max[type] = max <= LONG_MAX ? max : LONG_MAX;
}
#ifdef CONFIG_USER_NS
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index e1b8a915e9e9..f07e6998bb68 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -175,9 +175,8 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
-extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf);
+extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf);
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
@@ -258,7 +257,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma,
return true;
}
-static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+static inline int userfaultfd_unmap_prep(struct mm_struct *mm,
unsigned long start, unsigned long end,
struct list_head *uf)
{
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index d282f464d2f1..6d0f5e4e82c2 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -104,6 +104,7 @@ struct vdpa_iova_range {
};
struct vdpa_dev_set_config {
+ u64 device_features;
struct {
u8 mac[ETH_ALEN];
u16 mtu;
diff --git a/include/linux/verification.h b/include/linux/verification.h
index a655923335ae..f34e50ebcf60 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -17,6 +17,14 @@
#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
#define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL)
+static inline int system_keyring_id_check(u64 id)
+{
+ if (id > (unsigned long)VERIFY_USE_PLATFORM_KEYRING)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* The use to which an asymmetric key is being put.
*/
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e05ddc6fe6a5..e7cebeb875dd 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -14,6 +14,7 @@
#include <linux/workqueue.h>
#include <linux/poll.h>
#include <uapi/linux/vfio.h>
+#include <linux/iova_bitmap.h>
struct kvm;
@@ -33,10 +34,11 @@ struct vfio_device {
struct device *dev;
const struct vfio_device_ops *ops;
/*
- * mig_ops is a static property of the vfio_device which must be set
- * prior to registering the vfio_device.
+ * mig_ops/log_ops is a static property of the vfio_device which must
+ * be set prior to registering the vfio_device.
*/
const struct vfio_migration_ops *mig_ops;
+ const struct vfio_log_ops *log_ops;
struct vfio_group *group;
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
@@ -45,7 +47,9 @@ struct vfio_device {
struct kvm *kvm;
/* Members below here are private, not for driver use */
- refcount_t refcount;
+ unsigned int index;
+ struct device device; /* device.kref covers object life circle */
+ refcount_t refcount; /* user count on registered device*/
unsigned int open_count;
struct completion comp;
struct list_head group_next;
@@ -55,6 +59,8 @@ struct vfio_device {
/**
* struct vfio_device_ops - VFIO bus driver device callbacks
*
+ * @init: initialize private fields in device structure
+ * @release: Reclaim private fields in device structure
* @open_device: Called when the first file descriptor is opened for this device
* @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
@@ -72,6 +78,8 @@ struct vfio_device {
*/
struct vfio_device_ops {
char *name;
+ int (*init)(struct vfio_device *vdev);
+ void (*release)(struct vfio_device *vdev);
int (*open_device)(struct vfio_device *vdev);
void (*close_device)(struct vfio_device *vdev);
ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
@@ -109,6 +117,28 @@ struct vfio_migration_ops {
};
/**
+ * @log_start: Optional callback to ask the device start DMA logging.
+ * @log_stop: Optional callback to ask the device stop DMA logging.
+ * @log_read_and_clear: Optional callback to ask the device read
+ * and clear the dirty DMAs in some given range.
+ *
+ * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set
+ * of features does not track logging state relative to the device,
+ * therefore the device implementation of vfio_log_ops must handle
+ * arbitrary user requests. This includes rejecting subsequent calls
+ * to log_start without an intervening log_stop, as well as graceful
+ * handling of log_stop and log_read_and_clear from invalid states.
+ */
+struct vfio_log_ops {
+ int (*log_start)(struct vfio_device *device,
+ struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
+ int (*log_stop)(struct vfio_device *device);
+ int (*log_read_and_clear)(struct vfio_device *device,
+ unsigned long iova, unsigned long length,
+ struct iova_bitmap *dirty);
+};
+
+/**
* vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
* @flags: Arg from the device_feature op
* @argsz: Arg from the device_feature op
@@ -137,9 +167,23 @@ static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
return 1;
}
-void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
- const struct vfio_device_ops *ops);
-void vfio_uninit_group_dev(struct vfio_device *device);
+struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
+ const struct vfio_device_ops *ops);
+#define vfio_alloc_device(dev_struct, member, dev, ops) \
+ container_of(_vfio_alloc_device(sizeof(struct dev_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct dev_struct, member)), \
+ dev, ops), \
+ struct dev_struct, member)
+
+int vfio_init_device(struct vfio_device *device, struct device *dev,
+ const struct vfio_device_ops *ops);
+void vfio_free_device(struct vfio_device *device);
+static inline void vfio_put_device(struct vfio_device *device)
+{
+ put_device(&device->device);
+}
+
int vfio_register_group_dev(struct vfio_device *device);
int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
@@ -155,6 +199,7 @@ int vfio_mig_get_next_state(struct vfio_device *device,
* External user API
*/
struct iommu_group *vfio_file_iommu_group(struct file *file);
+bool vfio_file_is_group(struct file *file);
bool vfio_file_enforced_coherent(struct file *file);
void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 5579ece4347b..367fd79226a3 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -20,39 +20,10 @@
#define VFIO_PCI_CORE_H
#define VFIO_PCI_OFFSET_SHIFT 40
-
#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
-/* Special capability IDs predefined access */
-#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
-#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
-
-/* Cap maximum number of ioeventfds per device (arbitrary) */
-#define VFIO_PCI_IOEVENTFD_MAX 1000
-
-struct vfio_pci_ioeventfd {
- struct list_head next;
- struct vfio_pci_core_device *vdev;
- struct virqfd *virqfd;
- void __iomem *addr;
- uint64_t data;
- loff_t pos;
- int bar;
- int count;
- bool test_mem;
-};
-
-struct vfio_pci_irq_ctx {
- struct eventfd_ctx *trigger;
- struct virqfd *unmask;
- struct virqfd *mask;
- char *name;
- bool masked;
- struct irq_bypass_producer producer;
-};
-
struct vfio_pci_core_device;
struct vfio_pci_region;
@@ -78,23 +49,6 @@ struct vfio_pci_region {
u32 flags;
};
-struct vfio_pci_dummy_resource {
- struct resource resource;
- int index;
- struct list_head res_next;
-};
-
-struct vfio_pci_vf_token {
- struct mutex lock;
- uuid_t uuid;
- int users;
-};
-
-struct vfio_pci_mmap_vma {
- struct vm_area_struct *vma;
- struct list_head vma_next;
-};
-
struct vfio_pci_core_device {
struct vfio_device vdev;
struct pci_dev *pdev;
@@ -124,11 +78,14 @@ struct vfio_pci_core_device {
bool needs_reset;
bool nointx;
bool needs_pm_restore;
+ bool pm_intx_masked;
+ bool pm_runtime_engaged;
struct pci_saved_state *pci_saved_state;
struct pci_saved_state *pm_save;
int ioeventfds_nr;
struct eventfd_ctx *err_trigger;
struct eventfd_ctx *req_trigger;
+ struct eventfd_ctx *pm_wake_eventfd_ctx;
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
struct list_head ioeventfds_list;
@@ -141,100 +98,17 @@ struct vfio_pci_core_device {
struct rw_semaphore memory_lock;
};
-#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
-#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX)
-#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX)
-#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
-#define irq_is(vdev, type) (vdev->irq_type == type)
-
-void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
-
-int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev,
- uint32_t flags, unsigned index,
- unsigned start, unsigned count, void *data);
-
-ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite);
-
-ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
-
-#ifdef CONFIG_VFIO_PCI_VGA
-ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
- size_t count, loff_t *ppos, bool iswrite);
-#else
-static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
- char __user *buf, size_t count,
- loff_t *ppos, bool iswrite)
-{
- return -EINVAL;
-}
-#endif
-
-long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
- uint64_t data, int count, int fd);
-
-int vfio_pci_init_perm_bits(void);
-void vfio_pci_uninit_perm_bits(void);
-
-int vfio_config_init(struct vfio_pci_core_device *vdev);
-void vfio_config_free(struct vfio_pci_core_device *vdev);
-
-int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
- unsigned int type, unsigned int subtype,
- const struct vfio_pci_regops *ops,
- size_t size, u32 flags, void *data);
-
-int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
- pci_power_t state);
-
-bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
-void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev);
-u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
-void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
- u16 cmd);
-
-#ifdef CONFIG_VFIO_PCI_IGD
-int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
-#else
-static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
-{
- return -ENODEV;
-}
-#endif
-
-#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
-int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
- struct vfio_info_cap *caps);
-int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev);
-void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev);
-#else
-static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
- struct vfio_info_cap *caps)
-{
- return -ENODEV;
-}
-
-static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
-{
- return 0;
-}
-
-static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
-{}
-#endif
-
/* Will be exported for vfio pci drivers usage */
+int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data);
void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
bool is_disable_idle_d3);
void vfio_pci_core_close_device(struct vfio_device *core_vdev);
-void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
- struct pci_dev *pdev,
- const struct vfio_device_ops *vfio_pci_ops);
+int vfio_pci_core_init_dev(struct vfio_device *core_vdev);
+void vfio_pci_core_release_dev(struct vfio_device *core_vdev);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
-void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev);
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
extern const struct pci_error_handlers vfio_pci_core_err_handlers;
int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
@@ -256,9 +130,4 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
-static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
-{
- return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
-}
-
#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/virtio_pci_legacy.h b/include/linux/virtio_pci_legacy.h
index e5d665faf00e..a8dc757d0367 100644
--- a/include/linux/virtio_pci_legacy.h
+++ b/include/linux/virtio_pci_legacy.h
@@ -32,8 +32,6 @@ void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
u16 index, u32 queue_pfn);
bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
u16 idx);
-void vp_legacy_set_queue_size(struct virtio_pci_legacy_device *ldev,
- u16 idx, u16 size);
u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
u16 idx);
int vp_legacy_probe(struct virtio_pci_legacy_device *ldev);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index f3fc36cd2276..3518dba1e02f 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -129,10 +129,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */
-#ifdef CONFIG_DEBUG_VM_VMACACHE
- VMACACHE_FIND_CALLS,
- VMACACHE_FIND_HITS,
-#endif
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
deleted file mode 100644
index 6fce268a4588..000000000000
--- a/include/linux/vmacache.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_VMACACHE_H
-#define __LINUX_VMACACHE_H
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-static inline void vmacache_flush(struct task_struct *tsk)
-{
- memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
-}
-
-extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
-extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
- unsigned long addr);
-
-#ifndef CONFIG_MMU
-extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
-#endif
-
-static inline void vmacache_invalidate(struct mm_struct *mm)
-{
- mm->vmacache_seqnum++;
-}
-
-#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index bfe38869498d..19cf5b6892ce 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -125,12 +125,6 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
-#ifdef CONFIG_DEBUG_VM_VMACACHE
-#define count_vm_vmacache_event(x) count_vm_event(x)
-#else
-#define count_vm_vmacache_event(x) do {} while (0)
-#endif
-
#define __count_zid_vm_events(item, zid, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index b5ab452fca5b..c1f5aebef170 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -30,7 +30,6 @@ struct vc_data *vc_deallocate(unsigned int console);
void reset_palette(struct vc_data *vc);
void do_blank_screen(int entering_gfx);
void do_unblank_screen(int leaving_gfx);
-void unblank_screen(void);
void poke_blanked_console(void);
int con_font_op(struct vc_data *vc, struct console_font_op *op);
int con_set_cmap(unsigned char __user *cmap);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 58cfbf81447c..7f5a51aae0a7 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -281,7 +281,7 @@ static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
#define ___wait_is_interruptible(state) \
(!__builtin_constant_p(state) || \
- state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+ (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
@@ -361,8 +361,8 @@ do { \
} while (0)
#define __wait_event_freezable(wq_head, condition) \
- ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
- freezable_schedule())
+ ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
+ 0, 0, schedule())
/**
* wait_event_freezable - sleep (or freeze) until a condition gets true
@@ -420,8 +420,8 @@ do { \
#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
- __ret = freezable_schedule_timeout(__ret))
+ (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
+ __ret = schedule_timeout(__ret))
/*
* like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
@@ -642,8 +642,8 @@ do { \
#define __wait_event_freezable_exclusive(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
- freezable_schedule())
+ ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
+ schedule())
#define wait_event_freezable_exclusive(wq, condition) \
({ \
@@ -932,6 +932,34 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_state(wq, condition, state) \
+ ___wait_event(wq, condition, state, 0, 0, schedule())
+
+/**
+ * wait_event_state - sleep until a condition gets true
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @state: state to sleep in
+ *
+ * The process is put to sleep (@state) until the @condition evaluates to true
+ * or a signal is received (when allowed by @state). The @condition is checked
+ * each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a signal
+ * (when allowed by @state) and 0 if @condition evaluated to true.
+ */
+#define wait_event_state(wq_head, condition, state) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_state(wq_head, condition, state); \
+ __ret; \
+})
+
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_KILLABLE, 0, timeout, \
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 2d1b54556eff..e6e34d74dda0 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -26,7 +26,15 @@ struct compat_iw_point {
struct __compat_iw_event {
__u16 len; /* Real length of this stuff */
__u16 cmd; /* Wireless IOCTL */
- compat_caddr_t pointer;
+
+ union {
+ compat_caddr_t pointer;
+
+ /* we need ptr_bytes to make memcpy() run-time destination
+ * buffer bounds checking happy, nothing special
+ */
+ DECLARE_FLEX_ARRAY(__u8, ptr_bytes);
+ };
};
#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer)
#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3f045f6d6c4f..06f9291b6fd5 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -17,20 +17,12 @@ struct bio;
DECLARE_PER_CPU(int, dirty_throttle_leaks);
/*
- * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
- *
- * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
- *
- * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
- * time) for the dirty pages to drop, unless written enough pages.
- *
* The global dirty threshold is normally equal to the global dirty limit,
* except when the system suddenly allocates a lot of anonymous memory and
* knocks down the global dirty threshold quickly, in which case the global
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
*/
#define DIRTY_SCOPE 8
-#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
struct backing_dev_info;
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 979a9d3e5bfb..4c379d23ec6e 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -61,7 +61,7 @@ int __vfs_setxattr_locked(struct user_namespace *, struct dentry *,
const char *, const void *, size_t, int,
struct inode **);
int vfs_setxattr(struct user_namespace *, struct dentry *, const char *,
- void *, size_t, int);
+ const void *, size_t, int);
int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
int __vfs_removexattr_locked(struct user_namespace *, struct dentry *,
const char *, struct inode **);
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 4b57bbba588a..8ce066c035cf 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -662,12 +662,14 @@ do { \
if (X##_e < 0) \
{ \
FP_SET_EXCEPTION(FP_EX_INEXACT); \
+ fallthrough; \
case FP_CLS_ZERO: \
r = 0; \
} \
else if (X##_e >= rsize - (rsigned > 0 || X##_s) \
|| (!rsigned && X##_s)) \
{ /* overflow */ \
+ fallthrough; \
case FP_CLS_NAN: \
case FP_CLS_INF: \
if (rsigned == 2) \
@@ -767,6 +769,7 @@ do { \
if (X##_e >= rsize - (rsigned > 0 || X##_s) \
|| (!rsigned && X##_s)) \
{ /* overflow */ \
+ fallthrough; \
case FP_CLS_NAN: \
case FP_CLS_INF: \
if (!rsigned) \
diff --git a/include/media/davinci/dm355_ccdc.h b/include/media/davinci/dm355_ccdc.h
deleted file mode 100644
index 1f3d00aa46d1..000000000000
--- a/include/media/davinci/dm355_ccdc.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- */
-#ifndef _DM355_CCDC_H
-#define _DM355_CCDC_H
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* enum for No of pixel per line to be avg. in Black Clamping */
-enum ccdc_sample_length {
- CCDC_SAMPLE_1PIXELS,
- CCDC_SAMPLE_2PIXELS,
- CCDC_SAMPLE_4PIXELS,
- CCDC_SAMPLE_8PIXELS,
- CCDC_SAMPLE_16PIXELS
-};
-
-/* enum for No of lines in Black Clamping */
-enum ccdc_sample_line {
- CCDC_SAMPLE_1LINES,
- CCDC_SAMPLE_2LINES,
- CCDC_SAMPLE_4LINES,
- CCDC_SAMPLE_8LINES,
- CCDC_SAMPLE_16LINES
-};
-
-/* enum for Alaw gamma width */
-enum ccdc_gamma_width {
- CCDC_GAMMA_BITS_13_4,
- CCDC_GAMMA_BITS_12_3,
- CCDC_GAMMA_BITS_11_2,
- CCDC_GAMMA_BITS_10_1,
- CCDC_GAMMA_BITS_09_0
-};
-
-enum ccdc_colpats {
- CCDC_RED,
- CCDC_GREEN_RED,
- CCDC_GREEN_BLUE,
- CCDC_BLUE
-};
-
-struct ccdc_col_pat {
- enum ccdc_colpats olop;
- enum ccdc_colpats olep;
- enum ccdc_colpats elop;
- enum ccdc_colpats elep;
-};
-
-enum ccdc_datasft {
- CCDC_DATA_NO_SHIFT,
- CCDC_DATA_SHIFT_1BIT,
- CCDC_DATA_SHIFT_2BIT,
- CCDC_DATA_SHIFT_3BIT,
- CCDC_DATA_SHIFT_4BIT,
- CCDC_DATA_SHIFT_5BIT,
- CCDC_DATA_SHIFT_6BIT
-};
-
-enum ccdc_data_size {
- CCDC_DATA_16BITS,
- CCDC_DATA_15BITS,
- CCDC_DATA_14BITS,
- CCDC_DATA_13BITS,
- CCDC_DATA_12BITS,
- CCDC_DATA_11BITS,
- CCDC_DATA_10BITS,
- CCDC_DATA_8BITS
-};
-enum ccdc_mfilt1 {
- CCDC_NO_MEDIAN_FILTER1,
- CCDC_AVERAGE_FILTER1,
- CCDC_MEDIAN_FILTER1
-};
-
-enum ccdc_mfilt2 {
- CCDC_NO_MEDIAN_FILTER2,
- CCDC_AVERAGE_FILTER2,
- CCDC_MEDIAN_FILTER2
-};
-
-/* structure for ALaw */
-struct ccdc_a_law {
- /* Enable/disable A-Law */
- unsigned char enable;
- /* Gamma Width Input */
- enum ccdc_gamma_width gamma_wd;
-};
-
-/* structure for Black Clamping */
-struct ccdc_black_clamp {
- /* only if bClampEnable is TRUE */
- unsigned char b_clamp_enable;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_length sample_pixel;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_line sample_ln;
- /* only if bClampEnable is TRUE */
- unsigned short start_pixel;
- /* only if bClampEnable is FALSE */
- unsigned short sgain;
- unsigned short dc_sub;
-};
-
-/* structure for Black Level Compensation */
-struct ccdc_black_compensation {
- /* Constant value to subtract from Red component */
- unsigned char r;
- /* Constant value to subtract from Gr component */
- unsigned char gr;
- /* Constant value to subtract from Blue component */
- unsigned char b;
- /* Constant value to subtract from Gb component */
- unsigned char gb;
-};
-
-struct ccdc_float {
- int integer;
- unsigned int decimal;
-};
-
-#define CCDC_CSC_COEFF_TABLE_SIZE 16
-/* structure for color space converter */
-struct ccdc_csc {
- unsigned char enable;
- /*
- * S8Q5. Use 2 decimal precision, user values range from -3.00 to 3.99.
- * example - to use 1.03, set integer part as 1, and decimal part as 3
- * to use -1.03, set integer part as -1 and decimal part as 3
- */
- struct ccdc_float coeff[CCDC_CSC_COEFF_TABLE_SIZE];
-};
-
-/* Structures for Vertical Defect Correction*/
-enum ccdc_vdf_csl {
- CCDC_VDF_NORMAL,
- CCDC_VDF_HORZ_INTERPOL_SAT,
- CCDC_VDF_HORZ_INTERPOL
-};
-
-enum ccdc_vdf_cuda {
- CCDC_VDF_WHOLE_LINE_CORRECT,
- CCDC_VDF_UPPER_DISABLE
-};
-
-enum ccdc_dfc_mwr {
- CCDC_DFC_MWR_WRITE_COMPLETE,
- CCDC_DFC_WRITE_REG
-};
-
-enum ccdc_dfc_mrd {
- CCDC_DFC_READ_COMPLETE,
- CCDC_DFC_READ_REG
-};
-
-enum ccdc_dfc_ma_rst {
- CCDC_DFC_INCR_ADDR,
- CCDC_DFC_CLR_ADDR
-};
-
-enum ccdc_dfc_mclr {
- CCDC_DFC_CLEAR_COMPLETE,
- CCDC_DFC_CLEAR
-};
-
-struct ccdc_dft_corr_ctl {
- enum ccdc_vdf_csl vdfcsl;
- enum ccdc_vdf_cuda vdfcuda;
- unsigned int vdflsft;
-};
-
-struct ccdc_dft_corr_mem_ctl {
- enum ccdc_dfc_mwr dfcmwr;
- enum ccdc_dfc_mrd dfcmrd;
- enum ccdc_dfc_ma_rst dfcmarst;
- enum ccdc_dfc_mclr dfcmclr;
-};
-
-#define CCDC_DFT_TABLE_SIZE 16
-/*
- * Main Structure for vertical defect correction. Vertical defect
- * correction can correct up to 16 defects if defects less than 16
- * then pad the rest with 0
- */
-struct ccdc_vertical_dft {
- unsigned char ver_dft_en;
- unsigned char gen_dft_en;
- unsigned int saturation_ctl;
- struct ccdc_dft_corr_ctl dft_corr_ctl;
- struct ccdc_dft_corr_mem_ctl dft_corr_mem_ctl;
- int table_size;
- unsigned int dft_corr_horz[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_vert[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub1[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub2[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub3[CCDC_DFT_TABLE_SIZE];
-};
-
-struct ccdc_data_offset {
- unsigned char horz_offset;
- unsigned char vert_offset;
-};
-
-/*
- * Structure for CCDC configuration parameters for raw capture mode passed
- * by application
- */
-struct ccdc_config_params_raw {
- /* data shift to be applied before storing */
- enum ccdc_datasft datasft;
- /* data size value from 8 to 16 bits */
- enum ccdc_data_size data_sz;
- /* median filter for sdram */
- enum ccdc_mfilt1 mfilt1;
- enum ccdc_mfilt2 mfilt2;
- /* low pass filter enable/disable */
- unsigned char lpf_enable;
- /* Threshold of median filter */
- int med_filt_thres;
- /*
- * horz and vertical data offset. Applicable for defect correction
- * and lsc
- */
- struct ccdc_data_offset data_offset;
- /* Structure for Optional A-Law */
- struct ccdc_a_law alaw;
- /* Structure for Optical Black Clamp */
- struct ccdc_black_clamp blk_clamp;
- /* Structure for Black Compensation */
- struct ccdc_black_compensation blk_comp;
- /* structure for vertical Defect Correction Module Configuration */
- struct ccdc_vertical_dft vertical_dft;
- /* structure for color space converter Module Configuration */
- struct ccdc_csc csc;
- /* color patters for bayer capture */
- struct ccdc_col_pat col_pat_field0;
- struct ccdc_col_pat col_pat_field1;
-};
-
-#ifdef __KERNEL__
-#include <linux/io.h>
-
-#define CCDC_WIN_PAL {0, 0, 720, 576}
-#define CCDC_WIN_VGA {0, 0, 640, 480}
-
-struct ccdc_params_ycbcr {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* enable BT.656 embedded sync mode */
- int bt656_enable;
- /* cb:y:cr:y or y:cb:y:cr in memory */
- enum ccdc_pixorder pix_order;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
-};
-
-/* Gain applied to Raw Bayer data */
-struct ccdc_gain {
- unsigned short r_ye;
- unsigned short gr_cy;
- unsigned short gb_g;
- unsigned short b_mg;
-};
-
-/* Structure for CCDC configuration parameters for raw capture mode */
-struct ccdc_params_raw {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
- /* Gain values */
- struct ccdc_gain gain;
- /* offset */
- unsigned int ccdc_offset;
- /* horizontal flip enable */
- unsigned char horz_flip_enable;
- /*
- * enable to store the image in inverse order in memory
- * (bottom to top)
- */
- unsigned char image_invert_enable;
- /* Configurable part of raw data */
- struct ccdc_config_params_raw config_params;
-};
-
-#endif
-#endif /* DM355_CCDC_H */
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
deleted file mode 100644
index c20dba3d76d6..000000000000
--- a/include/media/davinci/dm644x_ccdc.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- */
-#ifndef _DM644X_CCDC_H
-#define _DM644X_CCDC_H
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* enum for No of pixel per line to be avg. in Black Clamping*/
-enum ccdc_sample_length {
- CCDC_SAMPLE_1PIXELS,
- CCDC_SAMPLE_2PIXELS,
- CCDC_SAMPLE_4PIXELS,
- CCDC_SAMPLE_8PIXELS,
- CCDC_SAMPLE_16PIXELS
-};
-
-/* enum for No of lines in Black Clamping */
-enum ccdc_sample_line {
- CCDC_SAMPLE_1LINES,
- CCDC_SAMPLE_2LINES,
- CCDC_SAMPLE_4LINES,
- CCDC_SAMPLE_8LINES,
- CCDC_SAMPLE_16LINES
-};
-
-/* enum for Alaw gamma width */
-enum ccdc_gamma_width {
- CCDC_GAMMA_BITS_15_6, /* use bits 15-6 for gamma */
- CCDC_GAMMA_BITS_14_5,
- CCDC_GAMMA_BITS_13_4,
- CCDC_GAMMA_BITS_12_3,
- CCDC_GAMMA_BITS_11_2,
- CCDC_GAMMA_BITS_10_1,
- CCDC_GAMMA_BITS_09_0 /* use bits 9-0 for gamma */
-};
-
-/* returns the highest bit used for the gamma */
-static inline u8 ccdc_gamma_width_max_bit(enum ccdc_gamma_width width)
-{
- return 15 - width;
-}
-
-enum ccdc_data_size {
- CCDC_DATA_16BITS,
- CCDC_DATA_15BITS,
- CCDC_DATA_14BITS,
- CCDC_DATA_13BITS,
- CCDC_DATA_12BITS,
- CCDC_DATA_11BITS,
- CCDC_DATA_10BITS,
- CCDC_DATA_8BITS
-};
-
-/* returns the highest bit used for this data size */
-static inline u8 ccdc_data_size_max_bit(enum ccdc_data_size sz)
-{
- return sz == CCDC_DATA_8BITS ? 7 : 15 - sz;
-}
-
-/* structure for ALaw */
-struct ccdc_a_law {
- /* Enable/disable A-Law */
- unsigned char enable;
- /* Gamma Width Input */
- enum ccdc_gamma_width gamma_wd;
-};
-
-/* structure for Black Clamping */
-struct ccdc_black_clamp {
- unsigned char enable;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_length sample_pixel;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_line sample_ln;
- /* only if bClampEnable is TRUE */
- unsigned short start_pixel;
- /* only if bClampEnable is TRUE */
- unsigned short sgain;
- /* only if bClampEnable is FALSE */
- unsigned short dc_sub;
-};
-
-/* structure for Black Level Compensation */
-struct ccdc_black_compensation {
- /* Constant value to subtract from Red component */
- char r;
- /* Constant value to subtract from Gr component */
- char gr;
- /* Constant value to subtract from Blue component */
- char b;
- /* Constant value to subtract from Gb component */
- char gb;
-};
-
-/* Structure for CCDC configuration parameters for raw capture mode passed
- * by application
- */
-struct ccdc_config_params_raw {
- /* data size value from 8 to 16 bits */
- enum ccdc_data_size data_sz;
- /* Structure for Optional A-Law */
- struct ccdc_a_law alaw;
- /* Structure for Optical Black Clamp */
- struct ccdc_black_clamp blk_clamp;
- /* Structure for Black Compensation */
- struct ccdc_black_compensation blk_comp;
-};
-
-
-#ifdef __KERNEL__
-#include <linux/io.h>
-/* Define to enable/disable video port */
-#define FP_NUM_BYTES 4
-/* Define for extra pixel/line and extra lines/frame */
-#define NUM_EXTRAPIXELS 8
-#define NUM_EXTRALINES 8
-
-/* settings for commonly used video formats */
-#define CCDC_WIN_PAL {0, 0, 720, 576}
-/* ntsc square pixel */
-#define CCDC_WIN_VGA {0, 0, (640 + NUM_EXTRAPIXELS), (480 + NUM_EXTRALINES)}
-
-/* Structure for CCDC configuration parameters for raw capture mode */
-struct ccdc_params_raw {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
- /*
- * enable to store the image in inverse
- * order in memory(bottom to top)
- */
- unsigned char image_invert_enable;
- /* configurable parameters */
- struct ccdc_config_params_raw config_params;
-};
-
-struct ccdc_params_ycbcr {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* enable BT.656 embedded sync mode */
- int bt656_enable;
- /* cb:y:cr:y or y:cb:y:cr in memory */
- enum ccdc_pixorder pix_order;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
-};
-#endif
-#endif /* _DM644X_CCDC_H */
diff --git a/include/media/davinci/isif.h b/include/media/davinci/isif.h
deleted file mode 100644
index 8369acd26e7e..000000000000
--- a/include/media/davinci/isif.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * isif header file
- */
-#ifndef _ISIF_H
-#define _ISIF_H
-
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* isif float type S8Q8/U8Q8 */
-struct isif_float_8 {
- /* 8 bit integer part */
- __u8 integer;
- /* 8 bit decimal part */
- __u8 decimal;
-};
-
-/* isif float type U16Q16/S16Q16 */
-struct isif_float_16 {
- /* 16 bit integer part */
- __u16 integer;
- /* 16 bit decimal part */
- __u16 decimal;
-};
-
-/************************************************************************
- * Vertical Defect Correction parameters
- ***********************************************************************/
-/* Defect Correction (DFC) table entry */
-struct isif_vdfc_entry {
- /* vertical position of defect */
- __u16 pos_vert;
- /* horizontal position of defect */
- __u16 pos_horz;
- /*
- * Defect level of Vertical line defect position. This is subtracted
- * from the data at the defect position
- */
- __u8 level_at_pos;
- /*
- * Defect level of the pixels upper than the vertical line defect.
- * This is subtracted from the data
- */
- __u8 level_up_pixels;
- /*
- * Defect level of the pixels lower than the vertical line defect.
- * This is subtracted from the data
- */
- __u8 level_low_pixels;
-};
-
-#define ISIF_VDFC_TABLE_SIZE 8
-struct isif_dfc {
- /* enable vertical defect correction */
- __u8 en;
- /* Defect level subtraction. Just fed through if saturating */
-#define ISIF_VDFC_NORMAL 0
- /*
- * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
- * if data saturating
- */
-#define ISIF_VDFC_HORZ_INTERPOL_IF_SAT 1
- /* Horizontal interpolation (((i-2)+(i+2))/2) */
-#define ISIF_VDFC_HORZ_INTERPOL 2
- /* one of the vertical defect correction modes above */
- __u8 corr_mode;
- /* 0 - whole line corrected, 1 - not pixels upper than the defect */
- __u8 corr_whole_line;
-#define ISIF_VDFC_NO_SHIFT 0
-#define ISIF_VDFC_SHIFT_1 1
-#define ISIF_VDFC_SHIFT_2 2
-#define ISIF_VDFC_SHIFT_3 3
-#define ISIF_VDFC_SHIFT_4 4
- /*
- * defect level shift value. level_at_pos, level_upper_pos,
- * and level_lower_pos can be shifted up by this value. Choose
- * one of the values above
- */
- __u8 def_level_shift;
- /* defect saturation level */
- __u16 def_sat_level;
- /* number of vertical defects. Max is ISIF_VDFC_TABLE_SIZE */
- __u16 num_vdefects;
- /* VDFC table ptr */
- struct isif_vdfc_entry table[ISIF_VDFC_TABLE_SIZE];
-};
-
-struct isif_horz_bclamp {
-
- /* Horizontal clamp disabled. Only vertical clamp value is subtracted */
-#define ISIF_HORZ_BC_DISABLE 0
- /*
- * Horizontal clamp value is calculated and subtracted from image data
- * along with vertical clamp value
- */
-#define ISIF_HORZ_BC_CLAMP_CALC_ENABLED 1
- /*
- * Horizontal clamp value calculated from previous image is subtracted
- * from image data along with vertical clamp value.
- */
-#define ISIF_HORZ_BC_CLAMP_NOT_UPDATED 2
- /* horizontal clamp mode. One of the values above */
- __u8 mode;
- /*
- * pixel value limit enable.
- * 0 - limit disabled
- * 1 - pixel value limited to 1023
- */
- __u8 clamp_pix_limit;
- /* Select Most left window for bc calculation */
-#define ISIF_SEL_MOST_LEFT_WIN 0
- /* Select Most right window for bc calculation */
-#define ISIF_SEL_MOST_RIGHT_WIN 1
- /* Select most left or right window for clamp val calculation */
- __u8 base_win_sel_calc;
- /* Window count per color for calculation. range 1-32 */
- __u8 win_count_calc;
- /* Window start position - horizontal for calculation. 0 - 8191 */
- __u16 win_start_h_calc;
- /* Window start position - vertical for calculation 0 - 8191 */
- __u16 win_start_v_calc;
-#define ISIF_HORZ_BC_SZ_H_2PIXELS 0
-#define ISIF_HORZ_BC_SZ_H_4PIXELS 1
-#define ISIF_HORZ_BC_SZ_H_8PIXELS 2
-#define ISIF_HORZ_BC_SZ_H_16PIXELS 3
- /* Width of the sample window in pixels for calculation */
- __u8 win_h_sz_calc;
-#define ISIF_HORZ_BC_SZ_V_32PIXELS 0
-#define ISIF_HORZ_BC_SZ_V_64PIXELS 1
-#define ISIF_HORZ_BC_SZ_V_128PIXELS 2
-#define ISIF_HORZ_BC_SZ_V_256PIXELS 3
- /* Height of the sample window in pixels for calculation */
- __u8 win_v_sz_calc;
-};
-
-/************************************************************************
- * Black Clamp parameters
- ***********************************************************************/
-struct isif_vert_bclamp {
- /* Reset value used is the clamp value calculated */
-#define ISIF_VERT_BC_USE_HORZ_CLAMP_VAL 0
- /* Reset value used is reset_clamp_val configured */
-#define ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL 1
- /* No update, previous image value is used */
-#define ISIF_VERT_BC_NO_UPDATE 2
- /*
- * Reset value selector for vertical clamp calculation. Use one of
- * the above values
- */
- __u8 reset_val_sel;
- /* U8Q8. Line average coefficient used in vertical clamp calculation */
- __u8 line_ave_coef;
- /* Height of the optical black region for calculation */
- __u16 ob_v_sz_calc;
- /* Optical black region start position - horizontal. 0 - 8191 */
- __u16 ob_start_h;
- /* Optical black region start position - vertical 0 - 8191 */
- __u16 ob_start_v;
-};
-
-struct isif_black_clamp {
- /*
- * This offset value is added irrespective of the clamp enable status.
- * S13
- */
- __u16 dc_offset;
- /*
- * Enable black/digital clamp value to be subtracted from the image data
- */
- __u8 en;
- /*
- * black clamp mode. same/separate clamp for 4 colors
- * 0 - disable - same clamp value for all colors
- * 1 - clamp value calculated separately for all colors
- */
- __u8 bc_mode_color;
- /* Vertical start position for bc subtraction */
- __u16 vert_start_sub;
- /* Black clamp for horizontal direction */
- struct isif_horz_bclamp horz;
- /* Black clamp for vertical direction */
- struct isif_vert_bclamp vert;
-};
-
-/*************************************************************************
-** Color Space Conversion (CSC)
-*************************************************************************/
-#define ISIF_CSC_NUM_COEFF 16
-struct isif_color_space_conv {
- /* Enable color space conversion */
- __u8 en;
- /*
- * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
- * so forth
- */
- struct isif_float_8 coeff[ISIF_CSC_NUM_COEFF];
-};
-
-
-/*************************************************************************
-** Black Compensation parameters
-*************************************************************************/
-struct isif_black_comp {
- /* Comp for Red */
- __s8 r_comp;
- /* Comp for Gr */
- __s8 gr_comp;
- /* Comp for Blue */
- __s8 b_comp;
- /* Comp for Gb */
- __s8 gb_comp;
-};
-
-/*************************************************************************
-** Gain parameters
-*************************************************************************/
-struct isif_gain {
- /* Gain for Red or ye */
- struct isif_float_16 r_ye;
- /* Gain for Gr or cy */
- struct isif_float_16 gr_cy;
- /* Gain for Gb or g */
- struct isif_float_16 gb_g;
- /* Gain for Blue or mg */
- struct isif_float_16 b_mg;
-};
-
-#define ISIF_LINEAR_TAB_SIZE 192
-/*************************************************************************
-** Linearization parameters
-*************************************************************************/
-struct isif_linearize {
- /* Enable or Disable linearization of data */
- __u8 en;
- /* Shift value applied */
- __u8 corr_shft;
- /* scale factor applied U11Q10 */
- struct isif_float_16 scale_fact;
- /* Size of the linear table */
- __u16 table[ISIF_LINEAR_TAB_SIZE];
-};
-
-/* Color patterns */
-#define ISIF_RED 0
-#define ISIF_GREEN_RED 1
-#define ISIF_GREEN_BLUE 2
-#define ISIF_BLUE 3
-struct isif_col_pat {
- __u8 olop;
- __u8 olep;
- __u8 elop;
- __u8 elep;
-};
-
-/*************************************************************************
-** Data formatter parameters
-*************************************************************************/
-struct isif_fmtplen {
- /*
- * number of program entries for SET0, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen0;
- /*
- * number of program entries for SET1, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen1;
- /**
- * number of program entries for SET2, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen2;
- /**
- * number of program entries for SET3, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen3;
-};
-
-struct isif_fmt_cfg {
-#define ISIF_SPLIT 0
-#define ISIF_COMBINE 1
- /* Split or combine or line alternate */
- __u8 fmtmode;
- /* enable or disable line alternating mode */
- __u8 ln_alter_en;
-#define ISIF_1LINE 0
-#define ISIF_2LINES 1
-#define ISIF_3LINES 2
-#define ISIF_4LINES 3
- /* Split/combine line number */
- __u8 lnum;
- /* Address increment Range 1 - 16 */
- __u8 addrinc;
-};
-
-struct isif_fmt_addr_ptr {
- /* Initial address */
- __u32 init_addr;
- /* output line number */
-#define ISIF_1STLINE 0
-#define ISIF_2NDLINE 1
-#define ISIF_3RDLINE 2
-#define ISIF_4THLINE 3
- __u8 out_line;
-};
-
-struct isif_fmtpgm_ap {
- /* program address pointer */
- __u8 pgm_aptr;
- /* program address increment or decrement */
- __u8 pgmupdt;
-};
-
-struct isif_data_formatter {
- /* Enable/Disable data formatter */
- __u8 en;
- /* data formatter configuration */
- struct isif_fmt_cfg cfg;
- /* Formatter program entries length */
- struct isif_fmtplen plen;
- /* first pixel in a line fed to formatter */
- __u16 fmtrlen;
- /* HD interval for output line. Only valid when split line */
- __u16 fmthcnt;
- /* formatter address pointers */
- struct isif_fmt_addr_ptr fmtaddr_ptr[16];
- /* program enable/disable */
- __u8 pgm_en[32];
- /* program address pointers */
- struct isif_fmtpgm_ap fmtpgm_ap[32];
-};
-
-struct isif_df_csc {
- /* Color Space Conversion configuration, 0 - csc, 1 - df */
- __u8 df_or_csc;
- /* csc configuration valid if df_or_csc is 0 */
- struct isif_color_space_conv csc;
- /* data formatter configuration valid if df_or_csc is 1 */
- struct isif_data_formatter df;
- /* start pixel in a line at the input */
- __u32 start_pix;
- /* number of pixels in input line */
- __u32 num_pixels;
- /* start line at the input */
- __u32 start_line;
- /* number of lines at the input */
- __u32 num_lines;
-};
-
-struct isif_gain_offsets_adj {
- /* Gain adjustment per color */
- struct isif_gain gain;
- /* Offset adjustment */
- __u16 offset;
- /* Enable or Disable Gain adjustment for SDRAM data */
- __u8 gain_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- __u8 gain_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- __u8 gain_h3a_en;
- /* Enable or Disable Gain adjustment for SDRAM data */
- __u8 offset_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- __u8 offset_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- __u8 offset_h3a_en;
-};
-
-struct isif_cul {
- /* Horizontal Cull pattern for odd lines */
- __u8 hcpat_odd;
- /* Horizontal Cull pattern for even lines */
- __u8 hcpat_even;
- /* Vertical Cull pattern */
- __u8 vcpat;
- /* Enable or disable lpf. Apply when cull is enabled */
- __u8 en_lpf;
-};
-
-struct isif_compress {
-#define ISIF_ALAW 0
-#define ISIF_DPCM 1
-#define ISIF_NO_COMPRESSION 2
- /* Compression Algorithm used */
- __u8 alg;
- /* Choose Predictor1 for DPCM compression */
-#define ISIF_DPCM_PRED1 0
- /* Choose Predictor2 for DPCM compression */
-#define ISIF_DPCM_PRED2 1
- /* Predictor for DPCM compression */
- __u8 pred;
-};
-
-/* all the stuff in this struct will be provided by userland */
-struct isif_config_params_raw {
- /* Linearization parameters for image sensor data input */
- struct isif_linearize linearize;
- /* Data formatter or CSC */
- struct isif_df_csc df_csc;
- /* Defect Pixel Correction (DFC) configuration */
- struct isif_dfc dfc;
- /* Black/Digital Clamp configuration */
- struct isif_black_clamp bclamp;
- /* Gain, offset adjustments */
- struct isif_gain_offsets_adj gain_offset;
- /* Culling */
- struct isif_cul culling;
- /* A-Law and DPCM compression options */
- struct isif_compress compress;
- /* horizontal offset for Gain/LSC/DFC */
- __u16 horz_offset;
- /* vertical offset for Gain/LSC/DFC */
- __u16 vert_offset;
- /* color pattern for field 0 */
- struct isif_col_pat col_pat_field0;
- /* color pattern for field 1 */
- struct isif_col_pat col_pat_field1;
-#define ISIF_NO_SHIFT 0
-#define ISIF_1BIT_SHIFT 1
-#define ISIF_2BIT_SHIFT 2
-#define ISIF_3BIT_SHIFT 3
-#define ISIF_4BIT_SHIFT 4
-#define ISIF_5BIT_SHIFT 5
-#define ISIF_6BIT_SHIFT 6
- /* Data shift applied before storing to SDRAM */
- __u8 data_shift;
- /* enable input test pattern generation */
- __u8 test_pat_gen;
-};
-
-#ifdef __KERNEL__
-struct isif_ycbcr_config {
- /* isif pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* isif frame format */
- enum ccdc_frmfmt frm_fmt;
- /* ISIF crop window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* isif pix order. Only used for ycbcr capture */
- enum ccdc_pixorder pix_order;
- /* isif buffer type. Only used for ycbcr capture */
- enum ccdc_buftype buf_type;
-};
-
-/* MSB of image data connected to sensor port */
-enum isif_data_msb {
- ISIF_BIT_MSB_15,
- ISIF_BIT_MSB_14,
- ISIF_BIT_MSB_13,
- ISIF_BIT_MSB_12,
- ISIF_BIT_MSB_11,
- ISIF_BIT_MSB_10,
- ISIF_BIT_MSB_9,
- ISIF_BIT_MSB_8,
- ISIF_BIT_MSB_7
-};
-
-enum isif_cfa_pattern {
- ISIF_CFA_PAT_MOSAIC,
- ISIF_CFA_PAT_STRIPE
-};
-
-struct isif_params_raw {
- /* isif pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* isif frame format */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* buffer type. Applicable for interlaced mode */
- enum ccdc_buftype buf_type;
- /* Gain values */
- struct isif_gain gain;
- /* cfa pattern */
- enum isif_cfa_pattern cfa_pat;
- /* Data MSB position */
- enum isif_data_msb data_msb;
- /* Enable horizontal flip */
- unsigned char horz_flip_en;
- /* Enable image invert vertically */
- unsigned char image_invert_en;
-
- /* all the userland defined stuff*/
- struct isif_config_params_raw config_params;
-};
-
-enum isif_data_pack {
- ISIF_PACK_16BIT,
- ISIF_PACK_12BIT,
- ISIF_PACK_8BIT
-};
-
-#define ISIF_WIN_NTSC {0, 0, 720, 480}
-#define ISIF_WIN_VGA {0, 0, 640, 480}
-
-#endif
-#endif
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index 6d2a93740130..d8751ea926a2 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -69,13 +69,13 @@ struct vpbe_layer {
struct vpbe_disp_buffer *cur_frm;
/* Pointer pointing to next v4l2_buffer */
struct vpbe_disp_buffer *next_frm;
- /* videobuf specific parameters
- * Buffer queue used in video-buf
+ /* vb2 specific parameters
+ * Buffer queue used in vb2
*/
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
- /* Used in video-buf */
+ /* Used for video buffer handling */
spinlock_t irqlock;
/* V4l2 specific parameters */
/* Identifies video device for this layer */
diff --git a/include/media/drv-intf/saa7146.h b/include/media/drv-intf/saa7146.h
deleted file mode 100644
index 71ce63c99cb4..000000000000
--- a/include/media/drv-intf/saa7146.h
+++ /dev/null
@@ -1,472 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SAA7146__
-#define __SAA7146__
-
-#include <linux/delay.h> /* for delay-stuff */
-#include <linux/slab.h> /* for kmalloc/kfree */
-#include <linux/pci.h> /* for pci-config-stuff, vendor ids etc. */
-#include <linux/init.h> /* for "__init" */
-#include <linux/interrupt.h> /* for IMMEDIATE_BH */
-#include <linux/kmod.h> /* for kernel module loader */
-#include <linux/i2c.h> /* for i2c subsystem */
-#include <asm/io.h> /* for accessing devices */
-#include <linux/stringify.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-
-#include <linux/vmalloc.h> /* for vmalloc() */
-#include <linux/mm.h> /* for vmalloc_to_page() */
-
-#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr)))
-#define saa7146_read(sxy,adr) readl(sxy->mem+(adr))
-
-extern unsigned int saa7146_debug;
-
-#ifndef DEBUG_VARIABLE
- #define DEBUG_VARIABLE saa7146_debug
-#endif
-
-#define ERR(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
-
-#define _DBG(mask, fmt, ...) \
-do { \
- if (DEBUG_VARIABLE & mask) \
- pr_debug("%s(): " fmt, __func__, ##__VA_ARGS__); \
-} while (0)
-
-/* simple debug messages */
-#define DEB_S(fmt, ...) _DBG(0x01, fmt, ##__VA_ARGS__)
-/* more detailed debug messages */
-#define DEB_D(fmt, ...) _DBG(0x02, fmt, ##__VA_ARGS__)
-/* print enter and exit of functions */
-#define DEB_EE(fmt, ...) _DBG(0x04, fmt, ##__VA_ARGS__)
-/* i2c debug messages */
-#define DEB_I2C(fmt, ...) _DBG(0x08, fmt, ##__VA_ARGS__)
-/* vbi debug messages */
-#define DEB_VBI(fmt, ...) _DBG(0x10, fmt, ##__VA_ARGS__)
-/* interrupt debug messages */
-#define DEB_INT(fmt, ...) _DBG(0x20, fmt, ##__VA_ARGS__)
-/* capture debug messages */
-#define DEB_CAP(fmt, ...) _DBG(0x40, fmt, ##__VA_ARGS__)
-
-#define SAA7146_ISR_CLEAR(x,y) \
- saa7146_write(x, ISR, (y));
-
-struct module;
-
-struct saa7146_dev;
-struct saa7146_extension;
-struct saa7146_vv;
-
-/* saa7146 page table */
-struct saa7146_pgtable {
- unsigned int size;
- __le32 *cpu;
- dma_addr_t dma;
- /* used for offsets for u,v planes for planar capture modes */
- unsigned long offset;
- /* used for custom pagetables (used for example by budget dvb cards) */
- struct scatterlist *slist;
- int nents;
-};
-
-struct saa7146_pci_extension_data {
- struct saa7146_extension *ext;
- void *ext_priv; /* most likely a name string */
-};
-
-#define MAKE_EXTENSION_PCI(x_var, x_vendor, x_device) \
- { \
- .vendor = PCI_VENDOR_ID_PHILIPS, \
- .device = PCI_DEVICE_ID_PHILIPS_SAA7146, \
- .subvendor = x_vendor, \
- .subdevice = x_device, \
- .driver_data = (unsigned long)& x_var, \
- }
-
-struct saa7146_extension
-{
- char name[32]; /* name of the device */
-#define SAA7146_USE_I2C_IRQ 0x1
-#define SAA7146_I2C_SHORT_DELAY 0x2
- int flags;
-
- /* pairs of subvendor and subdevice ids for
- supported devices, last entry 0xffff, 0xfff */
- struct module *module;
- struct pci_driver driver;
- const struct pci_device_id *pci_tbl;
-
- /* extension functions */
- int (*probe)(struct saa7146_dev *);
- int (*attach)(struct saa7146_dev *, struct saa7146_pci_extension_data *);
- int (*detach)(struct saa7146_dev*);
-
- u32 irq_mask; /* mask to indicate, which irq-events are handled by the extension */
- void (*irq_func)(struct saa7146_dev*, u32* irq_mask);
-};
-
-struct saa7146_dma
-{
- dma_addr_t dma_handle;
- __le32 *cpu_addr;
-};
-
-struct saa7146_dev
-{
- struct module *module;
-
- struct v4l2_device v4l2_dev;
- struct v4l2_ctrl_handler ctrl_handler;
-
- /* different device locks */
- spinlock_t slock;
- struct mutex v4l2_lock;
-
- unsigned char __iomem *mem; /* pointer to mapped IO memory */
- u32 revision; /* chip revision; needed for bug-workarounds*/
-
- /* pci-device & irq stuff*/
- char name[32];
- struct pci_dev *pci;
- u32 int_todo;
- spinlock_t int_slock;
-
- /* extension handling */
- struct saa7146_extension *ext; /* indicates if handled by extension */
- void *ext_priv; /* pointer for extension private use (most likely some private data) */
- struct saa7146_ext_vv *ext_vv_data;
-
- /* per device video/vbi information (if available) */
- struct saa7146_vv *vv_data;
- void (*vv_callback)(struct saa7146_dev *dev, unsigned long status);
-
- /* i2c-stuff */
- struct mutex i2c_lock;
-
- u32 i2c_bitrate;
- struct saa7146_dma d_i2c; /* pointer to i2c memory */
- wait_queue_head_t i2c_wq;
- int i2c_op;
-
- /* memories */
- struct saa7146_dma d_rps0;
- struct saa7146_dma d_rps1;
-};
-
-static inline struct saa7146_dev *to_saa7146_dev(struct v4l2_device *v4l2_dev)
-{
- return container_of(v4l2_dev, struct saa7146_dev, v4l2_dev);
-}
-
-/* from saa7146_i2c.c */
-int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate);
-
-/* from saa7146_core.c */
-int saa7146_register_extension(struct saa7146_extension*);
-int saa7146_unregister_extension(struct saa7146_extension*);
-struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
-int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
-void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
-int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
-void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt);
-void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt);
-void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data);
-int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop);
-
-/* some memory sizes */
-#define SAA7146_I2C_MEM ( 1*PAGE_SIZE)
-#define SAA7146_RPS_MEM ( 1*PAGE_SIZE)
-
-/* some i2c constants */
-#define SAA7146_I2C_TIMEOUT 100 /* i2c-timeout-value in ms */
-#define SAA7146_I2C_RETRIES 3 /* how many times shall we retry an i2c-operation? */
-#define SAA7146_I2C_DELAY 5 /* time we wait after certain i2c-operations */
-
-/* unsorted defines */
-#define ME1 0x0000000800
-#define PV1 0x0000000008
-
-/* gpio defines */
-#define SAA7146_GPIO_INPUT 0x00
-#define SAA7146_GPIO_IRQHI 0x10
-#define SAA7146_GPIO_IRQLO 0x20
-#define SAA7146_GPIO_IRQHL 0x30
-#define SAA7146_GPIO_OUTLO 0x40
-#define SAA7146_GPIO_OUTHI 0x50
-
-/* debi defines */
-#define DEBINOSWAP 0x000e0000
-
-/* define for the register programming sequencer (rps) */
-#define CMD_NOP 0x00000000 /* No operation */
-#define CMD_CLR_EVENT 0x00000000 /* Clear event */
-#define CMD_SET_EVENT 0x10000000 /* Set signal event */
-#define CMD_PAUSE 0x20000000 /* Pause */
-#define CMD_CHECK_LATE 0x30000000 /* Check late */
-#define CMD_UPLOAD 0x40000000 /* Upload */
-#define CMD_STOP 0x50000000 /* Stop */
-#define CMD_INTERRUPT 0x60000000 /* Interrupt */
-#define CMD_JUMP 0x80000000 /* Jump */
-#define CMD_WR_REG 0x90000000 /* Write (load) register */
-#define CMD_RD_REG 0xa0000000 /* Read (store) register */
-#define CMD_WR_REG_MASK 0xc0000000 /* Write register with mask */
-
-#define CMD_OAN MASK_27
-#define CMD_INV MASK_26
-#define CMD_SIG4 MASK_25
-#define CMD_SIG3 MASK_24
-#define CMD_SIG2 MASK_23
-#define CMD_SIG1 MASK_22
-#define CMD_SIG0 MASK_21
-#define CMD_O_FID_B MASK_14
-#define CMD_E_FID_B MASK_13
-#define CMD_O_FID_A MASK_12
-#define CMD_E_FID_A MASK_11
-
-/* some events and command modifiers for rps1 squarewave generator */
-#define EVT_HS (1<<15) // Source Line Threshold reached
-#define EVT_VBI_B (1<<9) // VSYNC Event
-#define RPS_OAN (1<<27) // 1: OR events, 0: AND events
-#define RPS_INV (1<<26) // Invert (compound) event
-#define GPIO3_MSK 0xFF000000 // GPIO #3 control bits
-
-/* Bit mask constants */
-#define MASK_00 0x00000001 /* Mask value for bit 0 */
-#define MASK_01 0x00000002 /* Mask value for bit 1 */
-#define MASK_02 0x00000004 /* Mask value for bit 2 */
-#define MASK_03 0x00000008 /* Mask value for bit 3 */
-#define MASK_04 0x00000010 /* Mask value for bit 4 */
-#define MASK_05 0x00000020 /* Mask value for bit 5 */
-#define MASK_06 0x00000040 /* Mask value for bit 6 */
-#define MASK_07 0x00000080 /* Mask value for bit 7 */
-#define MASK_08 0x00000100 /* Mask value for bit 8 */
-#define MASK_09 0x00000200 /* Mask value for bit 9 */
-#define MASK_10 0x00000400 /* Mask value for bit 10 */
-#define MASK_11 0x00000800 /* Mask value for bit 11 */
-#define MASK_12 0x00001000 /* Mask value for bit 12 */
-#define MASK_13 0x00002000 /* Mask value for bit 13 */
-#define MASK_14 0x00004000 /* Mask value for bit 14 */
-#define MASK_15 0x00008000 /* Mask value for bit 15 */
-#define MASK_16 0x00010000 /* Mask value for bit 16 */
-#define MASK_17 0x00020000 /* Mask value for bit 17 */
-#define MASK_18 0x00040000 /* Mask value for bit 18 */
-#define MASK_19 0x00080000 /* Mask value for bit 19 */
-#define MASK_20 0x00100000 /* Mask value for bit 20 */
-#define MASK_21 0x00200000 /* Mask value for bit 21 */
-#define MASK_22 0x00400000 /* Mask value for bit 22 */
-#define MASK_23 0x00800000 /* Mask value for bit 23 */
-#define MASK_24 0x01000000 /* Mask value for bit 24 */
-#define MASK_25 0x02000000 /* Mask value for bit 25 */
-#define MASK_26 0x04000000 /* Mask value for bit 26 */
-#define MASK_27 0x08000000 /* Mask value for bit 27 */
-#define MASK_28 0x10000000 /* Mask value for bit 28 */
-#define MASK_29 0x20000000 /* Mask value for bit 29 */
-#define MASK_30 0x40000000 /* Mask value for bit 30 */
-#define MASK_31 0x80000000 /* Mask value for bit 31 */
-
-#define MASK_B0 0x000000ff /* Mask value for byte 0 */
-#define MASK_B1 0x0000ff00 /* Mask value for byte 1 */
-#define MASK_B2 0x00ff0000 /* Mask value for byte 2 */
-#define MASK_B3 0xff000000 /* Mask value for byte 3 */
-
-#define MASK_W0 0x0000ffff /* Mask value for word 0 */
-#define MASK_W1 0xffff0000 /* Mask value for word 1 */
-
-#define MASK_PA 0xfffffffc /* Mask value for physical address */
-#define MASK_PR 0xfffffffe /* Mask value for protection register */
-#define MASK_ER 0xffffffff /* Mask value for the entire register */
-
-#define MASK_NONE 0x00000000 /* No mask */
-
-/* register aliases */
-#define BASE_ODD1 0x00 /* Video DMA 1 registers */
-#define BASE_EVEN1 0x04
-#define PROT_ADDR1 0x08
-#define PITCH1 0x0C
-#define BASE_PAGE1 0x10 /* Video DMA 1 base page */
-#define NUM_LINE_BYTE1 0x14
-
-#define BASE_ODD2 0x18 /* Video DMA 2 registers */
-#define BASE_EVEN2 0x1C
-#define PROT_ADDR2 0x20
-#define PITCH2 0x24
-#define BASE_PAGE2 0x28 /* Video DMA 2 base page */
-#define NUM_LINE_BYTE2 0x2C
-
-#define BASE_ODD3 0x30 /* Video DMA 3 registers */
-#define BASE_EVEN3 0x34
-#define PROT_ADDR3 0x38
-#define PITCH3 0x3C
-#define BASE_PAGE3 0x40 /* Video DMA 3 base page */
-#define NUM_LINE_BYTE3 0x44
-
-#define PCI_BT_V1 0x48 /* Video/FIFO 1 */
-#define PCI_BT_V2 0x49 /* Video/FIFO 2 */
-#define PCI_BT_V3 0x4A /* Video/FIFO 3 */
-#define PCI_BT_DEBI 0x4B /* DEBI */
-#define PCI_BT_A 0x4C /* Audio */
-
-#define DD1_INIT 0x50 /* Init setting of DD1 interface */
-
-#define DD1_STREAM_B 0x54 /* DD1 B video data stream handling */
-#define DD1_STREAM_A 0x56 /* DD1 A video data stream handling */
-
-#define BRS_CTRL 0x58 /* BRS control register */
-#define HPS_CTRL 0x5C /* HPS control register */
-#define HPS_V_SCALE 0x60 /* HPS vertical scale */
-#define HPS_V_GAIN 0x64 /* HPS vertical ACL and gain */
-#define HPS_H_PRESCALE 0x68 /* HPS horizontal prescale */
-#define HPS_H_SCALE 0x6C /* HPS horizontal scale */
-#define BCS_CTRL 0x70 /* BCS control */
-#define CHROMA_KEY_RANGE 0x74
-#define CLIP_FORMAT_CTRL 0x78 /* HPS outputs formats & clipping */
-
-#define DEBI_CONFIG 0x7C
-#define DEBI_COMMAND 0x80
-#define DEBI_PAGE 0x84
-#define DEBI_AD 0x88
-
-#define I2C_TRANSFER 0x8C
-#define I2C_STATUS 0x90
-
-#define BASE_A1_IN 0x94 /* Audio 1 input DMA */
-#define PROT_A1_IN 0x98
-#define PAGE_A1_IN 0x9C
-
-#define BASE_A1_OUT 0xA0 /* Audio 1 output DMA */
-#define PROT_A1_OUT 0xA4
-#define PAGE_A1_OUT 0xA8
-
-#define BASE_A2_IN 0xAC /* Audio 2 input DMA */
-#define PROT_A2_IN 0xB0
-#define PAGE_A2_IN 0xB4
-
-#define BASE_A2_OUT 0xB8 /* Audio 2 output DMA */
-#define PROT_A2_OUT 0xBC
-#define PAGE_A2_OUT 0xC0
-
-#define RPS_PAGE0 0xC4 /* RPS task 0 page register */
-#define RPS_PAGE1 0xC8 /* RPS task 1 page register */
-
-#define RPS_THRESH0 0xCC /* HBI threshold for task 0 */
-#define RPS_THRESH1 0xD0 /* HBI threshold for task 1 */
-
-#define RPS_TOV0 0xD4 /* RPS timeout for task 0 */
-#define RPS_TOV1 0xD8 /* RPS timeout for task 1 */
-
-#define IER 0xDC /* Interrupt enable register */
-
-#define GPIO_CTRL 0xE0 /* GPIO 0-3 register */
-
-#define EC1SSR 0xE4 /* Event cnt set 1 source select */
-#define EC2SSR 0xE8 /* Event cnt set 2 source select */
-#define ECT1R 0xEC /* Event cnt set 1 thresholds */
-#define ECT2R 0xF0 /* Event cnt set 2 thresholds */
-
-#define ACON1 0xF4
-#define ACON2 0xF8
-
-#define MC1 0xFC /* Main control register 1 */
-#define MC2 0x100 /* Main control register 2 */
-
-#define RPS_ADDR0 0x104 /* RPS task 0 address register */
-#define RPS_ADDR1 0x108 /* RPS task 1 address register */
-
-#define ISR 0x10C /* Interrupt status register */
-#define PSR 0x110 /* Primary status register */
-#define SSR 0x114 /* Secondary status register */
-
-#define EC1R 0x118 /* Event counter set 1 register */
-#define EC2R 0x11C /* Event counter set 2 register */
-
-#define PCI_VDP1 0x120 /* Video DMA pointer of FIFO 1 */
-#define PCI_VDP2 0x124 /* Video DMA pointer of FIFO 2 */
-#define PCI_VDP3 0x128 /* Video DMA pointer of FIFO 3 */
-#define PCI_ADP1 0x12C /* Audio DMA pointer of audio out 1 */
-#define PCI_ADP2 0x130 /* Audio DMA pointer of audio in 1 */
-#define PCI_ADP3 0x134 /* Audio DMA pointer of audio out 2 */
-#define PCI_ADP4 0x138 /* Audio DMA pointer of audio in 2 */
-#define PCI_DMA_DDP 0x13C /* DEBI DMA pointer */
-
-#define LEVEL_REP 0x140,
-#define A_TIME_SLOT1 0x180, /* from 180 - 1BC */
-#define A_TIME_SLOT2 0x1C0, /* from 1C0 - 1FC */
-
-/* isr masks */
-#define SPCI_PPEF 0x80000000 /* PCI parity error */
-#define SPCI_PABO 0x40000000 /* PCI access error (target or master abort) */
-#define SPCI_PPED 0x20000000 /* PCI parity error on 'real time data' */
-#define SPCI_RPS_I1 0x10000000 /* Interrupt issued by RPS1 */
-#define SPCI_RPS_I0 0x08000000 /* Interrupt issued by RPS0 */
-#define SPCI_RPS_LATE1 0x04000000 /* RPS task 1 is late */
-#define SPCI_RPS_LATE0 0x02000000 /* RPS task 0 is late */
-#define SPCI_RPS_E1 0x01000000 /* RPS error from task 1 */
-#define SPCI_RPS_E0 0x00800000 /* RPS error from task 0 */
-#define SPCI_RPS_TO1 0x00400000 /* RPS timeout task 1 */
-#define SPCI_RPS_TO0 0x00200000 /* RPS timeout task 0 */
-#define SPCI_UPLD 0x00100000 /* RPS in upload */
-#define SPCI_DEBI_S 0x00080000 /* DEBI status */
-#define SPCI_DEBI_E 0x00040000 /* DEBI error */
-#define SPCI_IIC_S 0x00020000 /* I2C status */
-#define SPCI_IIC_E 0x00010000 /* I2C error */
-#define SPCI_A2_IN 0x00008000 /* Audio 2 input DMA protection / limit */
-#define SPCI_A2_OUT 0x00004000 /* Audio 2 output DMA protection / limit */
-#define SPCI_A1_IN 0x00002000 /* Audio 1 input DMA protection / limit */
-#define SPCI_A1_OUT 0x00001000 /* Audio 1 output DMA protection / limit */
-#define SPCI_AFOU 0x00000800 /* Audio FIFO over- / underflow */
-#define SPCI_V_PE 0x00000400 /* Video protection address */
-#define SPCI_VFOU 0x00000200 /* Video FIFO over- / underflow */
-#define SPCI_FIDA 0x00000100 /* Field ID video port A */
-#define SPCI_FIDB 0x00000080 /* Field ID video port B */
-#define SPCI_PIN3 0x00000040 /* GPIO pin 3 */
-#define SPCI_PIN2 0x00000020 /* GPIO pin 2 */
-#define SPCI_PIN1 0x00000010 /* GPIO pin 1 */
-#define SPCI_PIN0 0x00000008 /* GPIO pin 0 */
-#define SPCI_ECS 0x00000004 /* Event counter 1, 2, 4, 5 */
-#define SPCI_EC3S 0x00000002 /* Event counter 3 */
-#define SPCI_EC0S 0x00000001 /* Event counter 0 */
-
-/* i2c */
-#define SAA7146_I2C_ABORT (1<<7)
-#define SAA7146_I2C_SPERR (1<<6)
-#define SAA7146_I2C_APERR (1<<5)
-#define SAA7146_I2C_DTERR (1<<4)
-#define SAA7146_I2C_DRERR (1<<3)
-#define SAA7146_I2C_AL (1<<2)
-#define SAA7146_I2C_ERR (1<<1)
-#define SAA7146_I2C_BUSY (1<<0)
-
-#define SAA7146_I2C_START (0x3)
-#define SAA7146_I2C_CONT (0x2)
-#define SAA7146_I2C_STOP (0x1)
-#define SAA7146_I2C_NOP (0x0)
-
-#define SAA7146_I2C_BUS_BIT_RATE_6400 (0x500)
-#define SAA7146_I2C_BUS_BIT_RATE_3200 (0x100)
-#define SAA7146_I2C_BUS_BIT_RATE_480 (0x400)
-#define SAA7146_I2C_BUS_BIT_RATE_320 (0x600)
-#define SAA7146_I2C_BUS_BIT_RATE_240 (0x700)
-#define SAA7146_I2C_BUS_BIT_RATE_120 (0x000)
-#define SAA7146_I2C_BUS_BIT_RATE_80 (0x200)
-#define SAA7146_I2C_BUS_BIT_RATE_60 (0x300)
-
-static inline void SAA7146_IER_DISABLE(struct saa7146_dev *x, unsigned y)
-{
- unsigned long flags;
- spin_lock_irqsave(&x->int_slock, flags);
- saa7146_write(x, IER, saa7146_read(x, IER) & ~y);
- spin_unlock_irqrestore(&x->int_slock, flags);
-}
-
-static inline void SAA7146_IER_ENABLE(struct saa7146_dev *x, unsigned y)
-{
- unsigned long flags;
- spin_lock_irqsave(&x->int_slock, flags);
- saa7146_write(x, IER, saa7146_read(x, IER) | y);
- spin_unlock_irqrestore(&x->int_slock, flags);
-}
-
-#endif
diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h
deleted file mode 100644
index 635805fb35e8..000000000000
--- a/include/media/drv-intf/saa7146_vv.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SAA7146_VV__
-#define __SAA7146_VV__
-
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-fh.h>
-#include <media/drv-intf/saa7146.h>
-#include <media/videobuf-dma-sg.h>
-
-#define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */
-#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
-
-#define WRITE_RPS0(x) do { \
- dev->d_rps0.cpu_addr[ count++ ] = cpu_to_le32(x); \
- } while (0);
-
-#define WRITE_RPS1(x) do { \
- dev->d_rps1.cpu_addr[ count++ ] = cpu_to_le32(x); \
- } while (0);
-
-struct saa7146_video_dma {
- u32 base_odd;
- u32 base_even;
- u32 prot_addr;
- u32 pitch;
- u32 base_page;
- u32 num_line_byte;
-};
-
-#define FORMAT_BYTE_SWAP 0x1
-#define FORMAT_IS_PLANAR 0x2
-
-struct saa7146_format {
- u32 pixelformat;
- u32 trans;
- u8 depth;
- u8 flags;
- u8 swap;
-};
-
-struct saa7146_standard
-{
- char *name;
- v4l2_std_id id;
-
- int v_offset; /* number of lines of vertical offset before processing */
- int v_field; /* number of lines in a field for HPS to process */
-
- int h_offset; /* horizontal offset of processing window */
- int h_pixels; /* number of horizontal pixels to process */
-
- int v_max_out;
- int h_max_out;
-};
-
-/* buffer for one video/vbi frame */
-struct saa7146_buf {
- /* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
- /* saa7146 specific */
- struct v4l2_pix_format *fmt;
- int (*activate)(struct saa7146_dev *dev,
- struct saa7146_buf *buf,
- struct saa7146_buf *next);
-
- /* page tables */
- struct saa7146_pgtable pt[3];
-};
-
-struct saa7146_dmaqueue {
- struct saa7146_dev *dev;
- struct saa7146_buf *curr;
- struct list_head queue;
- struct timer_list timeout;
-};
-
-struct saa7146_overlay {
- struct saa7146_fh *fh;
- struct v4l2_window win;
- struct v4l2_clip clips[16];
- int nclips;
-};
-
-/* per open data */
-struct saa7146_fh {
- /* Must be the first field! */
- struct v4l2_fh fh;
- struct saa7146_dev *dev;
-
- /* video capture */
- struct videobuf_queue video_q;
-
- /* vbi capture */
- struct videobuf_queue vbi_q;
-
- unsigned int resources; /* resource management for device open */
-};
-
-#define STATUS_OVERLAY 0x01
-#define STATUS_CAPTURE 0x02
-
-struct saa7146_vv
-{
- /* vbi capture */
- struct saa7146_dmaqueue vbi_dmaq;
- struct v4l2_vbi_format vbi_fmt;
- struct timer_list vbi_read_timeout;
- struct file *vbi_read_timeout_file;
- /* vbi workaround interrupt queue */
- wait_queue_head_t vbi_wq;
- int vbi_fieldcount;
- struct saa7146_fh *vbi_streaming;
-
- int video_status;
- struct saa7146_fh *video_fh;
-
- /* video overlay */
- struct saa7146_overlay ov;
- struct v4l2_framebuffer ov_fb;
- struct saa7146_format *ov_fmt;
- struct saa7146_fh *ov_suspend;
-
- /* video capture */
- struct saa7146_dmaqueue video_dmaq;
- struct v4l2_pix_format video_fmt;
- enum v4l2_field last_field;
-
- /* common: fixme? shouldn't this be in saa7146_fh?
- (this leads to a more complicated question: shall the driver
- store the different settings (for example S_INPUT) for every open
- and restore it appropriately, or should all settings be common for
- all opens? currently, we do the latter, like all other
- drivers do... */
- struct saa7146_standard *standard;
-
- int vflip;
- int hflip;
- int current_hps_source;
- int current_hps_sync;
-
- struct saa7146_dma d_clipping; /* pointer to clipping memory */
-
- unsigned int resources; /* resource management for device */
-};
-
-/* flags */
-#define SAA7146_USE_PORT_B_FOR_VBI 0x2 /* use input port b for vbi hardware bug workaround */
-
-struct saa7146_ext_vv
-{
- /* information about the video capabilities of the device */
- int inputs;
- int audios;
- u32 capabilities;
- int flags;
-
- /* additionally supported transmission standards */
- struct saa7146_standard *stds;
- int num_stds;
- int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
-
- /* the extension can override this */
- struct v4l2_ioctl_ops vid_ops;
- struct v4l2_ioctl_ops vbi_ops;
- /* pointer to the saa7146 core ops */
- const struct v4l2_ioctl_ops *core_ops;
-
- struct v4l2_file_operations vbi_fops;
-};
-
-struct saa7146_use_ops {
- void (*init)(struct saa7146_dev *, struct saa7146_vv *);
- int(*open)(struct saa7146_dev *, struct file *);
- void (*release)(struct saa7146_dev *, struct file *);
- void (*irq_done)(struct saa7146_dev *, unsigned long status);
- ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
-};
-
-/* from saa7146_fops.c */
-int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type);
-int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev);
-void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
-void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
-int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
-void saa7146_buffer_timeout(struct timer_list *t);
-void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q,
- struct saa7146_buf *buf);
-
-int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv);
-int saa7146_vv_release(struct saa7146_dev* dev);
-
-/* from saa7146_hlp.c */
-int saa7146_enable_overlay(struct saa7146_fh *fh);
-void saa7146_disable_overlay(struct saa7146_fh *fh);
-
-void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next);
-void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) ;
-void saa7146_set_hps_source_and_sync(struct saa7146_dev *saa, int source, int sync);
-void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
-
-/* from saa7146_video.c */
-extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
-extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops;
-extern const struct saa7146_use_ops saa7146_video_uops;
-int saa7146_start_preview(struct saa7146_fh *fh);
-int saa7146_stop_preview(struct saa7146_fh *fh);
-long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
-int saa7146_s_ctrl(struct v4l2_ctrl *ctrl);
-
-/* from saa7146_vbi.c */
-extern const struct saa7146_use_ops saa7146_vbi_uops;
-
-/* resource management functions */
-int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit);
-void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits);
-
-#define RESOURCE_DMA1_HPS 0x1
-#define RESOURCE_DMA2_CLP 0x2
-#define RESOURCE_DMA3_BRS 0x4
-
-/* saa7146 source inputs */
-#define SAA7146_HPS_SOURCE_PORT_A 0x00
-#define SAA7146_HPS_SOURCE_PORT_B 0x01
-#define SAA7146_HPS_SOURCE_YPB_CPA 0x02
-#define SAA7146_HPS_SOURCE_YPA_CPB 0x03
-
-/* sync inputs */
-#define SAA7146_HPS_SYNC_PORT_A 0x00
-#define SAA7146_HPS_SYNC_PORT_B 0x01
-
-/* some memory sizes */
-/* max. 16 clipping rectangles */
-#define SAA7146_CLIPPING_MEM (16 * 4 * sizeof(u32))
-
-/* some defines for the various clipping-modes */
-#define SAA7146_CLIPPING_RECT 0x4
-#define SAA7146_CLIPPING_RECT_INVERTED 0x5
-#define SAA7146_CLIPPING_MASK 0x6
-#define SAA7146_CLIPPING_MASK_INVERTED 0x7
-
-/* output formats: each entry holds four information */
-#define RGB08_COMPOSED 0x0217 /* composed is used in the sense of "not-planar" */
-/* this means: planar?=0, yuv2rgb-conversation-mode=2, dither=yes(=1), format-mode = 7 */
-#define RGB15_COMPOSED 0x0213
-#define RGB16_COMPOSED 0x0210
-#define RGB24_COMPOSED 0x0201
-#define RGB32_COMPOSED 0x0202
-
-#define Y8 0x0006
-#define YUV411_COMPOSED 0x0003
-#define YUV422_COMPOSED 0x0000
-/* this means: planar?=1, yuv2rgb-conversion-mode=0, dither=no(=0), format-mode = b */
-#define YUV411_DECOMPOSED 0x100b
-#define YUV422_DECOMPOSED 0x1009
-#define YUV420_DECOMPOSED 0x100a
-
-#define IS_PLANAR(x) (x & 0xf000)
-
-/* misc defines */
-#define SAA7146_NO_SWAP (0x0)
-#define SAA7146_TWO_BYTE_SWAP (0x1)
-#define SAA7146_FOUR_BYTE_SWAP (0x2)
-
-#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index b708d63995f4..725ff91b26e0 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -540,6 +540,10 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
unsigned int div);
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold);
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator);
+
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
{
/*
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 00828a4f9404..b76a0714d425 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -128,13 +128,13 @@ struct v4l2_ctrl_ops {
* otherwise.
*/
struct v4l2_ctrl_type_ops {
- bool (*equal)(const struct v4l2_ctrl *ctrl, u32 idx,
+ bool (*equal)(const struct v4l2_ctrl *ctrl, u32 elems,
union v4l2_ctrl_ptr ptr1,
union v4l2_ctrl_ptr ptr2);
- void (*init)(const struct v4l2_ctrl *ctrl, u32 idx,
+ void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx, u32 tot_elems,
union v4l2_ctrl_ptr ptr);
void (*log)(const struct v4l2_ctrl *ctrl);
- int (*validate)(const struct v4l2_ctrl *ctrl, u32 idx,
+ int (*validate)(const struct v4l2_ctrl *ctrl, u32 elems,
union v4l2_ctrl_ptr ptr);
};
@@ -203,7 +203,7 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* @elem_size: The size in bytes of the control.
* @new_elems: The number of elements in p_new. This is the same as @elems,
* except for dynamic arrays. In that case it is in the range of
- * 1 to @p_dyn_alloc_elems.
+ * 1 to @p_array_alloc_elems.
* @dims: The size of each dimension.
* @nr_of_dims:The number of dimensions in @dims.
* @menu_skip_mask: The control's skip mask for menu controls. This makes it
@@ -227,12 +227,11 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
- * @p_dyn: Pointer to the dynamically allocated array. Only valid if
- * @is_dyn_array is true.
- * @p_dyn_alloc_elems: The number of elements in the dynamically allocated
- * array for both the cur and new values. So @p_dyn is actually
- * sized for 2 * @p_dyn_alloc_elems * @elem_size. Only valid if
- * @is_dyn_array is true.
+ * @p_array: Pointer to the allocated array. Only valid if @is_array is true.
+ * @p_array_alloc_elems: The number of elements in the allocated
+ * array for both the cur and new values. So @p_array is actually
+ * sized for 2 * @p_array_alloc_elems * @elem_size. Only valid if
+ * @is_array is true.
* @cur: Structure to store the current value.
* @cur.val: The control's current value, if the @type is represented via
* a u32 integer (see &enum v4l2_ctrl_type).
@@ -291,8 +290,8 @@ struct v4l2_ctrl {
};
unsigned long flags;
void *priv;
- void *p_dyn;
- u32 p_dyn_alloc_elems;
+ void *p_array;
+ u32 p_array_alloc_elems;
s32 val;
struct {
s32 val;
@@ -319,15 +318,15 @@ struct v4l2_ctrl {
* from a cluster with multiple controls twice (when the first
* control of a cluster is applied, they all are).
* @p_req_valid: If set, then p_req contains the control value for the request.
- * @p_req_dyn_enomem: If set, then p_req is invalid since allocating space for
- * a dynamic array failed. Attempting to read this value shall
- * result in ENOMEM. Only valid if ctrl->is_dyn_array is true.
- * @p_req_dyn_alloc_elems: The number of elements allocated for the dynamic
- * array. Only valid if @p_req_valid and ctrl->is_dyn_array are
+ * @p_req_array_enomem: If set, then p_req is invalid since allocating space for
+ * an array failed. Attempting to read this value shall
+ * result in ENOMEM. Only valid if ctrl->is_array is true.
+ * @p_req_array_alloc_elems: The number of elements allocated for the
+ * array. Only valid if @p_req_valid and ctrl->is_array are
* true.
* @p_req_elems: The number of elements in @p_req. This is the same as
* ctrl->elems, except for dynamic arrays. In that case it is in
- * the range of 1 to @p_req_dyn_alloc_elems. Only valid if
+ * the range of 1 to @p_req_array_alloc_elems. Only valid if
* @p_req_valid is true.
* @p_req: If the control handler containing this control reference
* is bound to a media request, then this points to the
@@ -349,8 +348,8 @@ struct v4l2_ctrl_ref {
bool from_other_dev;
bool req_done;
bool p_req_valid;
- bool p_req_dyn_enomem;
- u32 p_req_dyn_alloc_elems;
+ bool p_req_array_enomem;
+ u32 p_req_array_alloc_elems;
u32 p_req_elems;
union v4l2_ctrl_ptr p_req;
};
@@ -959,6 +958,59 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
}
/**
+ *__v4l2_ctrl_modify_dimensions() - Unlocked variant of v4l2_ctrl_modify_dimensions()
+ *
+ * @ctrl: The control to update.
+ * @dims: The control's new dimensions.
+ *
+ * Update the dimensions of an array control on the fly. The elements of the
+ * array are reset to their default value, even if the dimensions are
+ * unchanged.
+ *
+ * An error is returned if @dims is invalid for this control.
+ *
+ * The caller is responsible for acquiring the control handler mutex on behalf
+ * of __v4l2_ctrl_modify_dimensions().
+ *
+ * Note: calling this function when the same control is used in pending requests
+ * is untested. It should work (a request with the wrong size of the control
+ * will drop that control silently), but it will be very confusing.
+ */
+int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS]);
+
+/**
+ * v4l2_ctrl_modify_dimensions() - Update the dimensions of an array control.
+ *
+ * @ctrl: The control to update.
+ * @dims: The control's new dimensions.
+ *
+ * Update the dimensions of an array control on the fly. The elements of the
+ * array are reset to their default value, even if the dimensions are
+ * unchanged.
+ *
+ * An error is returned if @dims is invalid for this control type.
+ *
+ * This function assumes that the control handler is not locked and will
+ * take the lock itself.
+ *
+ * Note: calling this function when the same control is used in pending requests
+ * is untested. It should work (a request with the wrong size of the control
+ * will drop that control silently), but it will be very confusing.
+ */
+static inline int v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS])
+{
+ int rval;
+
+ v4l2_ctrl_lock(ctrl);
+ rval = __v4l2_ctrl_modify_dimensions(ctrl, dims);
+ v4l2_ctrl_unlock(ctrl);
+
+ return rval;
+}
+
+/**
* v4l2_ctrl_notify() - Function to set a notify callback for a control.
*
* @ctrl: The control.
@@ -1486,4 +1538,52 @@ int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd);
int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ctrl_ops,
const struct v4l2_fwnode_device_properties *p);
+
+/**
+ * v4l2_ctrl_type_op_equal - Default v4l2_ctrl_type_ops equal callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @elems: The number of elements to compare.
+ * @ptr1: A v4l2 control value.
+ * @ptr2: A v4l2 control value.
+ *
+ * Return: true if values are equal, otherwise false.
+ */
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
+
+/**
+ * v4l2_ctrl_type_op_init - Default v4l2_ctrl_type_ops init callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @from_idx: Starting element index.
+ * @elems: The number of elements to initialize.
+ * @ptr: The v4l2 control value.
+ *
+ * Return: void
+ */
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ u32 elems, union v4l2_ctrl_ptr ptr);
+
+/**
+ * v4l2_ctrl_type_op_log - Default v4l2_ctrl_type_ops log callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ *
+ * Return: void
+ */
+void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl);
+
+/**
+ * v4l2_ctrl_type_op_validate - Default v4l2_ctrl_type_ops validate callback.
+ *
+ * @ctrl: The v4l2_ctrl pointer.
+ * @elems: The number of elements in the control.
+ * @ptr: The v4l2 control value.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems,
+ union v4l2_ctrl_ptr ptr);
+
#endif
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index fdbd5257e020..bb9de6a899e0 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -486,10 +486,10 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* @vma: pointer to struct &vm_area_struct
*
* Call from driver's mmap() function. Will handle mmap() for both queues
- * seamlessly for videobuffer, which will receive normal per-queue offsets and
- * proper videobuf queue pointers. The differentiation is made outside videobuf
- * by adding a predefined offset to buffers from one of the queues and
- * subtracting it before passing it back to videobuf. Only drivers (and
+ * seamlessly for the video buffer, which will receive normal per-queue offsets
+ * and proper vb2 queue pointers. The differentiation is made outside
+ * vb2 by adding a predefined offset to buffers from one of the queues
+ * and subtracting it before passing it back to vb2. Only drivers (and
* thus applications) receive modified offsets.
*/
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
@@ -544,7 +544,7 @@ void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @drv_priv: driver's instance private data
* @queue_init: a callback for queue type-specific initialization function
- * to be used for initializing videobuf_queues
+ * to be used for initializing vb2_queues
*
* Usually called from driver's ``open()`` function.
*/
@@ -579,7 +579,7 @@ void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
* @vbuf: pointer to struct &vb2_v4l2_buffer
*
- * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
+ * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
*/
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
struct vb2_v4l2_buffer *vbuf);
diff --git a/include/media/v4l2-uvc.h b/include/media/v4l2-uvc.h
new file mode 100644
index 000000000000..f83e31661333
--- /dev/null
+++ b/include/media/v4l2-uvc.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * v4l2 uvc internal API header
+ *
+ * Some commonly needed functions for uvc drivers
+ */
+
+#ifndef __LINUX_V4L2_UVC_H
+#define __LINUX_V4L2_UVC_H
+
+/* ------------------------------------------------------------------------
+ * GUIDs
+ */
+#define UVC_GUID_UVC_CAMERA \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
+#define UVC_GUID_UVC_OUTPUT \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
+#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+#define UVC_GUID_UVC_PROCESSING \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}
+#define UVC_GUID_UVC_SELECTOR \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
+#define UVC_GUID_EXT_GPIO_CONTROLLER \
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+
+#define UVC_GUID_FORMAT_MJPEG \
+ { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2 \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2_ISIGHT \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_NV12 \
+ { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YV12 \
+ { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_I420 \
+ { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_UYVY \
+ { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y800 \
+ { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8 \
+ { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y10 \
+ { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12 \
+ { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16 \
+ { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BY8 \
+ { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BA81 \
+ { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GBRG \
+ { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GRBG \
+ { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGGB \
+ { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BG16 \
+ { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GB16 \
+ { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RG16 \
+ { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GR16 \
+ { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RGBP \
+ { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BGR3 \
+ { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+#define UVC_GUID_FORMAT_M420 \
+ { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_H264 \
+ { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_H265 \
+ { 'H', '2', '6', '5', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_INVZ \
+ { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \
+ 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b}
+#define UVC_GUID_FORMAT_INZI \
+ { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \
+ 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a}
+#define UVC_GUID_FORMAT_INVI \
+ { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
+ 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
+#define UVC_GUID_FORMAT_CNF4 \
+ { 'C', ' ', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_D3DFMT_L8 \
+ {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
+ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+#define UVC_GUID_FORMAT_HEVC \
+ { 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
+/* ------------------------------------------------------------------------
+ * Video formats
+ */
+
+struct uvc_format_desc {
+ char *name;
+ u8 guid[16];
+ u32 fcc;
+};
+
+static struct uvc_format_desc uvc_fmts[] = {
+ {
+ .name = "YUV 4:2:2 (YUYV)",
+ .guid = UVC_GUID_FORMAT_YUY2,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .name = "YUV 4:2:2 (YUYV)",
+ .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .name = "YUV 4:2:0 (NV12)",
+ .guid = UVC_GUID_FORMAT_NV12,
+ .fcc = V4L2_PIX_FMT_NV12,
+ },
+ {
+ .name = "MJPEG",
+ .guid = UVC_GUID_FORMAT_MJPEG,
+ .fcc = V4L2_PIX_FMT_MJPEG,
+ },
+ {
+ .name = "YVU 4:2:0 (YV12)",
+ .guid = UVC_GUID_FORMAT_YV12,
+ .fcc = V4L2_PIX_FMT_YVU420,
+ },
+ {
+ .name = "YUV 4:2:0 (I420)",
+ .guid = UVC_GUID_FORMAT_I420,
+ .fcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .name = "YUV 4:2:0 (M420)",
+ .guid = UVC_GUID_FORMAT_M420,
+ .fcc = V4L2_PIX_FMT_M420,
+ },
+ {
+ .name = "YUV 4:2:2 (UYVY)",
+ .guid = UVC_GUID_FORMAT_UYVY,
+ .fcc = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .name = "Greyscale 8-bit (Y800)",
+ .guid = UVC_GUID_FORMAT_Y800,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "Greyscale 8-bit (Y8 )",
+ .guid = UVC_GUID_FORMAT_Y8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "Greyscale 8-bit (D3DFMT_L8)",
+ .guid = UVC_GUID_FORMAT_D3DFMT_L8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "IR 8-bit (L8_IR)",
+ .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .name = "Greyscale 10-bit (Y10 )",
+ .guid = UVC_GUID_FORMAT_Y10,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .name = "Greyscale 12-bit (Y12 )",
+ .guid = UVC_GUID_FORMAT_Y12,
+ .fcc = V4L2_PIX_FMT_Y12,
+ },
+ {
+ .name = "Greyscale 16-bit (Y16 )",
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
+ .name = "BGGR Bayer (BY8 )",
+ .guid = UVC_GUID_FORMAT_BY8,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .name = "BGGR Bayer (BA81)",
+ .guid = UVC_GUID_FORMAT_BA81,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .name = "GBRG Bayer (GBRG)",
+ .guid = UVC_GUID_FORMAT_GBRG,
+ .fcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .name = "GRBG Bayer (GRBG)",
+ .guid = UVC_GUID_FORMAT_GRBG,
+ .fcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .name = "RGGB Bayer (RGGB)",
+ .guid = UVC_GUID_FORMAT_RGGB,
+ .fcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
+ .name = "RGB565",
+ .guid = UVC_GUID_FORMAT_RGBP,
+ .fcc = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .name = "BGR 8:8:8 (BGR3)",
+ .guid = UVC_GUID_FORMAT_BGR3,
+ .fcc = V4L2_PIX_FMT_BGR24,
+ },
+ {
+ .name = "H.264",
+ .guid = UVC_GUID_FORMAT_H264,
+ .fcc = V4L2_PIX_FMT_H264,
+ },
+ {
+ .name = "H.265",
+ .guid = UVC_GUID_FORMAT_H265,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+ {
+ .name = "Greyscale 8 L/R (Y8I)",
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .name = "Greyscale 12 L/R (Y12I)",
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Bayer 10-bit (SRGGB10P)",
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
+ {
+ .name = "Bayer 16-bit (SBGGR16)",
+ .guid = UVC_GUID_FORMAT_BG16,
+ .fcc = V4L2_PIX_FMT_SBGGR16,
+ },
+ {
+ .name = "Bayer 16-bit (SGBRG16)",
+ .guid = UVC_GUID_FORMAT_GB16,
+ .fcc = V4L2_PIX_FMT_SGBRG16,
+ },
+ {
+ .name = "Bayer 16-bit (SRGGB16)",
+ .guid = UVC_GUID_FORMAT_RG16,
+ .fcc = V4L2_PIX_FMT_SRGGB16,
+ },
+ {
+ .name = "Bayer 16-bit (SGRBG16)",
+ .guid = UVC_GUID_FORMAT_GR16,
+ .fcc = V4L2_PIX_FMT_SGRBG16,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_INVZ,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Greyscale 10-bit (Y10 )",
+ .guid = UVC_GUID_FORMAT_INVI,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .name = "IR:Depth 26-bit (INZI)",
+ .guid = UVC_GUID_FORMAT_INZI,
+ .fcc = V4L2_PIX_FMT_INZI,
+ },
+ {
+ .name = "4-bit Depth Confidence (Packed)",
+ .guid = UVC_GUID_FORMAT_CNF4,
+ .fcc = V4L2_PIX_FMT_CNF4,
+ },
+ {
+ .name = "HEVC",
+ .guid = UVC_GUID_FORMAT_HEVC,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+};
+
+static inline struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
+{
+ unsigned int len = ARRAY_SIZE(uvc_fmts);
+ unsigned int i;
+
+ for (i = 0; i < len; ++i) {
+ if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
+ return &uvc_fmts[i];
+ }
+
+ return NULL;
+}
+
+#endif /* __LINUX_V4L2_UVC_H */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 5468b633b9d2..3253bd2f6fee 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -65,7 +65,7 @@ struct vb2_buffer;
* DMABUF memory types.
* @get_userptr: acquire userspace memory for a hardware operation; used for
* USERPTR memory types; vaddr is the address passed to the
- * videobuf layer when queuing a video buffer of USERPTR type;
+ * videobuf2 layer when queuing a video buffer of USERPTR type;
* should return an allocator private per-buffer structure
* associated with the buffer on success, ERR_PTR() on failure;
* the returned private structure will then be passed as @buf_priv
@@ -97,7 +97,7 @@ struct vb2_buffer;
* associated with the passed private structure or NULL if not
* available.
* @num_users: return the current number of users of a memory buffer;
- * return 1 if the videobuf layer (or actually the driver using
+ * return 1 if the videobuf2 layer (or actually the driver using
* it) is the only user.
* @mmap: setup a userspace mapping for a given memory buffer under
* the provided virtual memory region.
@@ -210,11 +210,11 @@ enum vb2_io_modes {
* enum vb2_buffer_state - current video buffer state.
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
* @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request.
- * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
- * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
+ * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf2.
+ * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf2, but not in driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation.
- * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
+ * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf2, but
* not yet dequeued to userspace.
* @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer
* has ended with an error, which will be reported
@@ -466,7 +466,7 @@ struct vb2_buf_ops {
};
/**
- * struct vb2_queue - a videobuf queue.
+ * struct vb2_queue - a videobuf2 queue.
*
* @type: private buffer type whose content is defined by the vb2-core
* caller. For example, for V4L2, it should match
@@ -544,7 +544,7 @@ struct vb2_buf_ops {
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @dma_dir: DMA mapping direction.
- * @bufs: videobuf buffer structures
+ * @bufs: videobuf2 buffer structures
* @num_buffers: number of allocated/used buffers
* @queued_list: list of buffers currently queued from userspace
* @queued_count: number of buffers queued and ready for streaming.
@@ -683,7 +683,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no);
void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
/**
- * vb2_buffer_done() - inform videobuf that an operation on a buffer
+ * vb2_buffer_done() - inform videobuf2 that an operation on a buffer
* is finished.
* @vb: pointer to &struct vb2_buffer to be used.
* @state: state of the buffer, as defined by &enum vb2_buffer_state.
diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h
index 8605366ec87c..2d577b945637 100644
--- a/include/media/videobuf2-dvb.h
+++ b/include/media/videobuf2-dvb.h
@@ -24,7 +24,7 @@ struct vb2_dvb {
struct dvb_frontend *frontend;
struct vb2_queue dvbq;
- /* video-buf-dvb state info */
+ /* vb2-dvb state info */
struct mutex lock;
int nfeeds;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 76e405c0b003..5a845887850b 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -63,22 +63,6 @@ struct vb2_v4l2_buffer {
container_of(vb, struct vb2_v4l2_buffer, vb2_buf)
/**
- * vb2_find_timestamp() - Find buffer with given timestamp in the queue
- *
- * @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @timestamp: the timestamp to find.
- * @start_idx: the start index (usually 0) in the buffer array to start
- * searching from. Note that there may be multiple buffers
- * with the same timestamp value, so you can restart the search
- * by setting @start_idx to the previously found index + 1.
- *
- * Returns the buffer index of the buffer with the given @timestamp, or
- * -1 if no buffer with @timestamp was found.
- */
-int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
- unsigned int start_idx);
-
-/**
* vb2_find_buffer() - Find a buffer with given timestamp
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index cc1b0d42ce95..48f4a5023d81 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -51,6 +51,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
* @dst: destination rectangle on the display (integer coordinates)
* @alpha: alpha value (0: fully transparent, 255: fully opaque)
* @zpos: Z position of the plane (from 0 to number of planes minus 1)
+ * @premult: true for premultiplied alpha
*/
struct vsp1_du_atomic_config {
u32 pixelformat;
@@ -60,6 +61,7 @@ struct vsp1_du_atomic_config {
struct v4l2_rect dst;
unsigned int alpha;
unsigned int zpos;
+ bool premult;
};
/**
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 24a509f559ee..13abe013af21 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -331,6 +331,9 @@ enum p9_qid_t {
/* size of header for zero copy read/write */
#define P9_ZC_HDR_SZ 4096
+/* maximum length of an error string */
+#define P9_ERRMAX 128
+
/**
* struct p9_qid - file system entity information
* @type: 8-bit type &p9_qid_t
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index ff842f963071..766ec07c9599 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -19,6 +19,10 @@
* @list: used to maintain a list of currently available transports
* @name: the human-readable name of the transport
* @maxsize: transport provided maximum packet size
+ * @pooled_rbuffers: currently only set for RDMA transport which pulls the
+ * response buffers from a shared pool, and accordingly
+ * we're less flexible when choosing the response message
+ * size in this case
* @def: set if this transport should be considered the default
* @create: member function to create a new connection on this transport
* @close: member function to discard a connection on this transport
@@ -38,6 +42,7 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
+ bool pooled_rbuffers;
int def; /* this transport should be default */
struct module *owner;
int (*create)(struct p9_client *client,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9cf6870b526e..61f2ceb3939e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -111,6 +111,7 @@ struct tc_action_ops {
struct list_head head;
char kind[IFNAMSIZ];
enum tca_id id; /* identifier should match kind */
+ unsigned int net_id;
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 1c53c4c4d88f..568a87c5e0d0 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -78,6 +78,7 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
struct sock *vsock_create_connected(struct sock *parent);
+void vsock_data_ready(struct sock *sk);
/**** TRANSPORT ****/
@@ -135,6 +136,7 @@ struct vsock_transport {
u64 (*stream_rcvhiwat)(struct vsock_sock *);
bool (*stream_is_active)(struct vsock_sock *);
bool (*stream_allow)(u32 cid, u32 port);
+ int (*set_rcvlowat)(struct vsock_sock *vsk, int val);
/* SEQ_PACKET. */
ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e72f3b247b5e..bcc5a4cd2c17 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -627,6 +627,7 @@ static inline bool iso_enabled(void)
int mgmt_init(void);
void mgmt_exit(void);
+void mgmt_cleanup(struct sock *sk);
void bt_sock_reclassify_lock(struct sock *sk, int proto);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index cf29511b25a8..e004ba04a9ae 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -354,6 +354,10 @@ enum {
HCI_LE_SIMULTANEOUS_ROLES,
HCI_CMD_DRAIN_WORKQUEUE,
+ HCI_MESH_EXPERIMENTAL,
+ HCI_MESH,
+ HCI_MESH_SENDING,
+
__HCI_NUM_FLAGS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e7862903187d..c54bc71254af 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -238,6 +238,7 @@ struct adv_info {
bool enabled;
bool pending;
bool periodic;
+ __u8 mesh;
__u8 instance;
__u32 flags;
__u16 timeout;
@@ -372,6 +373,8 @@ struct hci_dev {
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
__u8 le_states[8];
+ __u8 mesh_ad_types[16];
+ __u8 mesh_send_ref;
__u8 commands[64];
__u8 hci_ver;
__u16 hci_rev;
@@ -511,6 +514,7 @@ struct hci_dev {
struct list_head cmd_sync_work_list;
struct mutex cmd_sync_work_lock;
struct work_struct cmd_sync_cancel_work;
+ struct work_struct reenable_adv_work;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -561,6 +565,7 @@ struct hci_dev {
struct hci_conn_hash conn_hash;
+ struct list_head mesh_pending;
struct list_head mgmt_pending;
struct list_head reject_list;
struct list_head accept_list;
@@ -614,6 +619,8 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+ struct delayed_work mesh_send_done;
+
enum {
INTERLEAVE_SCAN_NONE,
INTERLEAVE_SCAN_NO_FILTER,
@@ -1576,7 +1583,8 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
u32 flags, u16 adv_data_len, u8 *adv_data,
u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration, s8 tx_power,
- u32 min_interval, u32 max_interval);
+ u32 min_interval, u32 max_interval,
+ u8 mesh_handle);
struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
u32 flags, u8 data_len, u8 *data,
u32 min_interval, u32 max_interval);
@@ -1997,6 +2005,9 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */
#define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */
#define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */
+#define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */
+#define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */
+#define INTERVAL_TO_MS(x) (((x) * 10) / 0x10)
#define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */
@@ -2048,7 +2059,8 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
- u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
+ u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
+ u64 instant);
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
@@ -2075,6 +2087,7 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
bdaddr_t *bdaddr, u8 addr_type);
+int hci_abort_conn(struct hci_conn *conn, u8 reason);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 0520e21ab698..9949870f7d78 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -124,8 +124,6 @@ struct hci_dev_info {
__u16 acl_pkts;
__u16 sco_mtu;
__u16 sco_pkts;
- __u16 iso_mtu;
- __u16 iso_pkts;
struct hci_dev_stats stat;
};
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 3843f5060c73..17f5a4c32f36 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -16,6 +16,7 @@ struct hci_cmd_sync_work_entry {
hci_cmd_sync_work_destroy_t destroy;
};
+struct adv_info;
/* Function with sync suffix shall not be called with hdev->lock held as they
* wait the command to complete and in the meantime an event could be received
* which could attempt to acquire hdev->lock causing a deadlock.
@@ -51,11 +52,16 @@ int hci_update_class_sync(struct hci_dev *hdev);
int hci_update_name_sync(struct hci_dev *hdev);
int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
+int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ bool use_rpa, struct adv_info *adv_instance,
+ u8 *own_addr_type, bdaddr_t *rand_addr);
+
int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
bool rpa, u8 *own_addr_type);
int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance);
+int hci_update_adv_data(struct hci_dev *hdev, u8 instance);
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
bool force);
@@ -72,7 +78,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
u8 instance, bool force);
int hci_disable_advertising_sync(struct hci_dev *hdev);
-
+int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force);
int hci_update_passive_scan_sync(struct hci_dev *hdev);
int hci_update_passive_scan(struct hci_dev *hdev);
int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7c1ad0f6fcec..743f6f59dff8 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -837,6 +837,42 @@ struct mgmt_cp_add_adv_patterns_monitor_rssi {
struct mgmt_adv_pattern patterns[];
} __packed;
#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8
+#define MGMT_OP_SET_MESH_RECEIVER 0x0057
+struct mgmt_cp_set_mesh {
+ __u8 enable;
+ __le16 window;
+ __le16 period;
+ __u8 num_ad_types;
+ __u8 ad_types[];
+} __packed;
+#define MGMT_SET_MESH_RECEIVER_SIZE 6
+
+#define MGMT_OP_MESH_READ_FEATURES 0x0058
+#define MGMT_MESH_READ_FEATURES_SIZE 0
+#define MESH_HANDLES_MAX 3
+struct mgmt_rp_mesh_read_features {
+ __le16 index;
+ __u8 max_handles;
+ __u8 used_handles;
+ __u8 handles[MESH_HANDLES_MAX];
+} __packed;
+
+#define MGMT_OP_MESH_SEND 0x0059
+struct mgmt_cp_mesh_send {
+ struct mgmt_addr_info addr;
+ __le64 instant;
+ __le16 delay;
+ __u8 cnt;
+ __u8 adv_data_len;
+ __u8 adv_data[];
+} __packed;
+#define MGMT_MESH_SEND_SIZE 19
+
+#define MGMT_OP_MESH_SEND_CANCEL 0x005A
+struct mgmt_cp_mesh_send_cancel {
+ __u8 handle;
+} __packed;
+#define MGMT_MESH_SEND_CANCEL_SIZE 1
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
@@ -1120,3 +1156,19 @@ struct mgmt_ev_adv_monitor_device_lost {
__le16 monitor_handle;
struct mgmt_addr_info addr;
} __packed;
+
+#define MGMT_EV_MESH_DEVICE_FOUND 0x0031
+struct mgmt_ev_mesh_device_found {
+ struct mgmt_addr_info addr;
+ __s8 rssi;
+ __le64 instant;
+ __le32 flags;
+ __le16 eir_len;
+ __u8 eir[];
+} __packed;
+
+
+#define MGMT_EV_MESH_PACKET_CMPLT 0x0032
+struct mgmt_ev_mesh_pkt_cmplt {
+ __u8 handle;
+} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index be2992e6de5d..a016f275cb01 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -15,8 +15,6 @@
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
-#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
-
#define AD_LACP_SLOW 0
#define AD_LACP_FAST 1
diff --git a/include/net/bonding.h b/include/net/bonding.h
index afd606df149a..e999f851738b 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -786,6 +786,9 @@ extern struct rtnl_link_ops bond_link_ops;
/* exported from bond_sysfs_slave.c */
extern const struct sysfs_ops slave_sysfs_ops;
+/* exported from bond_3ad.c */
+extern const u8 lacpdu_mcast_addr[];
+
static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
{
dev_core_stats_tx_dropped_inc(dev);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 908d58393484..e09ff87146c1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2316,6 +2316,7 @@ struct ocb_setup {
* @cwmax: Maximum contention window [a value of the form 2^n-1 in the range
* 1..32767]
* @aifs: Arbitration interframe space [0..255]
+ * @link_id: link_id or -1 for non-MLD
*/
struct ieee80211_txq_params {
enum nl80211_ac ac;
@@ -2323,6 +2324,7 @@ struct ieee80211_txq_params {
u16 cwmin;
u16 cwmax;
u8 aifs;
+ int link_id;
};
/**
@@ -3929,22 +3931,33 @@ struct mgmt_frame_regs {
* @del_intf_link: Remove an MLO link from the given interface.
*
* @add_key: add a key with the given parameters. @mac_addr will be %NULL
- * when adding a group key.
+ * when adding a group key. @link_id will be -1 for non-MLO connection.
+ * For MLO connection, @link_id will be >= 0 for group key and -1 for
+ * pairwise key, @mac_addr will be peer's MLD address for MLO pairwise key.
*
* @get_key: get information about the key with the given parameters.
* @mac_addr will be %NULL when requesting information for a group
* key. All pointers given to the @callback function need not be valid
* after it returns. This function should return an error if it is
* not possible to retrieve the key, -ENOENT if it doesn't exist.
+ * @link_id will be -1 for non-MLO connection. For MLO connection,
+ * @link_id will be >= 0 for group key and -1 for pairwise key, @mac_addr
+ * will be peer's MLD address for MLO pairwise key.
*
* @del_key: remove a key given the @mac_addr (%NULL for a group key)
- * and @key_index, return -ENOENT if the key doesn't exist.
+ * and @key_index, return -ENOENT if the key doesn't exist. @link_id will
+ * be -1 for non-MLO connection. For MLO connection, @link_id will be >= 0
+ * for group key and -1 for pairwise key, @mac_addr will be peer's MLD
+ * address for MLO pairwise key.
*
- * @set_default_key: set the default key on an interface
+ * @set_default_key: set the default key on an interface. @link_id will be >= 0
+ * for MLO connection and -1 for non-MLO connection.
*
- * @set_default_mgmt_key: set the default management frame key on an interface
+ * @set_default_mgmt_key: set the default management frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
- * @set_default_beacon_key: set the default Beacon frame key on an interface
+ * @set_default_beacon_key: set the default Beacon frame key on an interface.
+ * @link_id will be >= 0 for MLO connection and -1 for non-MLO connection.
*
* @set_rekey_data: give the data necessary for GTK rekeying to the driver
*
@@ -4293,22 +4306,24 @@ struct cfg80211_ops {
unsigned int link_id);
int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
int (*get_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*));
int (*del_key)(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
int (*set_default_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index, bool unicast, bool multicast);
int (*set_default_mgmt_key)(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index);
int (*set_default_beacon_key)(struct wiphy *wiphy,
struct net_device *netdev,
+ int link_id,
u8 key_index);
int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
@@ -8266,6 +8281,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* cfg80211_ch_switch_started_notify - notify channel switch start
* @dev: the device on which the channel switch started
* @chandef: the future channel definition
+ * @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
*
@@ -8275,7 +8291,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
*/
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u8 count, bool quiet);
+ unsigned int link_id, u8 count,
+ bool quiet);
/**
* ieee80211_operating_class_to_band - convert operating class to band
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 119ed1ffb988..ba6b8b094943 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -118,7 +118,6 @@ struct devlink_rate {
struct devlink_port {
struct list_head list;
- struct list_head param_list;
struct list_head region_list;
struct devlink *devlink;
unsigned int index;
@@ -130,7 +129,9 @@ struct devlink_port {
void *type_dev;
struct devlink_port_attrs attrs;
u8 attrs_set:1,
- switch_port:1;
+ switch_port:1,
+ registered:1,
+ initialized:1;
struct delayed_work type_warn_dw;
struct list_head reporter_list;
struct mutex reporters_lock; /* Protects reporter_list */
@@ -624,8 +625,7 @@ struct devlink_flash_update_params {
u32 overwrite_mask;
};
-#define DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT BIT(0)
-#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1)
+#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(0)
struct devlink_region;
struct devlink_info_req;
@@ -1564,6 +1564,9 @@ void devlink_set_features(struct devlink *devlink, u64 features);
void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
+void devlink_port_init(struct devlink *devlink,
+ struct devlink_port *devlink_port);
+void devlink_port_fini(struct devlink_port *devlink_port);
int devl_port_register(struct devlink *devlink,
struct devlink_port *devlink_port,
unsigned int port_index);
@@ -1714,15 +1717,31 @@ int devlink_info_driver_name_put(struct devlink_info_req *req,
const char *name);
int devlink_info_board_serial_number_put(struct devlink_info_req *req,
const char *bsn);
+
+enum devlink_info_version_type {
+ DEVLINK_INFO_VERSION_TYPE_NONE,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT, /* May be used as flash update
+ * component by name.
+ */
+};
+
int devlink_info_version_fixed_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
int devlink_info_version_stored_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
+int devlink_info_version_stored_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
int devlink_info_version_running_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
+int devlink_info_version_running_put_ext(struct devlink_info_req *req,
+ const char *version_name,
+ const char *version_value,
+ enum devlink_info_version_type version_type);
int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
diff --git a/include/net/dn.h b/include/net/dn.h
deleted file mode 100644
index ba9655b0098a..000000000000
--- a/include/net/dn.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_H
-#define _NET_DN_H
-
-#include <linux/dn.h>
-#include <net/sock.h>
-#include <net/flow.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-
-struct dn_scp /* Session Control Port */
-{
- unsigned char state;
-#define DN_O 1 /* Open */
-#define DN_CR 2 /* Connect Receive */
-#define DN_DR 3 /* Disconnect Reject */
-#define DN_DRC 4 /* Discon. Rej. Complete*/
-#define DN_CC 5 /* Connect Confirm */
-#define DN_CI 6 /* Connect Initiate */
-#define DN_NR 7 /* No resources */
-#define DN_NC 8 /* No communication */
-#define DN_CD 9 /* Connect Delivery */
-#define DN_RJ 10 /* Rejected */
-#define DN_RUN 11 /* Running */
-#define DN_DI 12 /* Disconnect Initiate */
-#define DN_DIC 13 /* Disconnect Complete */
-#define DN_DN 14 /* Disconnect Notificat */
-#define DN_CL 15 /* Closed */
-#define DN_CN 16 /* Closed Notification */
-
- __le16 addrloc;
- __le16 addrrem;
- __u16 numdat;
- __u16 numoth;
- __u16 numoth_rcv;
- __u16 numdat_rcv;
- __u16 ackxmt_dat;
- __u16 ackxmt_oth;
- __u16 ackrcv_dat;
- __u16 ackrcv_oth;
- __u8 flowrem_sw;
- __u8 flowloc_sw;
-#define DN_SEND 2
-#define DN_DONTSEND 1
-#define DN_NOCHANGE 0
- __u16 flowrem_dat;
- __u16 flowrem_oth;
- __u16 flowloc_dat;
- __u16 flowloc_oth;
- __u8 services_rem;
- __u8 services_loc;
- __u8 info_rem;
- __u8 info_loc;
-
- __u16 segsize_rem;
- __u16 segsize_loc;
-
- __u8 nonagle;
- __u8 multi_ireq;
- __u8 accept_mode;
- unsigned long seg_total; /* Running total of current segment */
-
- struct optdata_dn conndata_in;
- struct optdata_dn conndata_out;
- struct optdata_dn discdata_in;
- struct optdata_dn discdata_out;
- struct accessdata_dn accessdata;
-
- struct sockaddr_dn addr; /* Local address */
- struct sockaddr_dn peer; /* Remote address */
-
- /*
- * In this case the RTT estimation is not specified in the
- * docs, nor is any back off algorithm. Here we follow well
- * known tcp algorithms with a few small variations.
- *
- * snd_window: Max number of packets we send before we wait for
- * an ack to come back. This will become part of a
- * more complicated scheme when we support flow
- * control.
- *
- * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling
- * average.
- * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the
- * varience of the smoothed average (but calculated in
- * a simpler way than for normal statistical varience
- * calculations).
- *
- * nsp_rxtshift: Backoff counter. Value is zero normally, each time
- * a packet is lost is increases by one until an ack
- * is received. Its used to index an array of backoff
- * multipliers.
- */
-#define NSP_MIN_WINDOW 1
-#define NSP_MAX_WINDOW (0x07fe)
- unsigned long max_window;
- unsigned long snd_window;
-#define NSP_INITIAL_SRTT (HZ)
- unsigned long nsp_srtt;
-#define NSP_INITIAL_RTTVAR (HZ*3)
- unsigned long nsp_rttvar;
-#define NSP_MAXRXTSHIFT 12
- unsigned long nsp_rxtshift;
-
- /*
- * Output queues, one for data, one for otherdata/linkservice
- */
- struct sk_buff_head data_xmit_queue;
- struct sk_buff_head other_xmit_queue;
-
- /*
- * Input queue for other data
- */
- struct sk_buff_head other_receive_queue;
- int other_report;
-
- /*
- * Stuff to do with the slow timer
- */
- unsigned long stamp; /* time of last transmit */
- unsigned long persist;
- int (*persist_fxn)(struct sock *sk);
- unsigned long keepalive;
- void (*keepalive_fxn)(struct sock *sk);
-
-};
-
-static inline struct dn_scp *DN_SK(struct sock *sk)
-{
- return (struct dn_scp *)(sk + 1);
-}
-
-/*
- * src,dst : Source and Destination DECnet addresses
- * hops : Number of hops through the network
- * dst_port, src_port : NSP port numbers
- * services, info : Useful data extracted from conninit messages
- * rt_flags : Routing flags byte
- * nsp_flags : NSP layer flags byte
- * segsize : Size of segment
- * segnum : Number, for data, otherdata and linkservice
- * xmit_count : Number of times we've transmitted this skb
- * stamp : Time stamp of most recent transmission, used in RTT calculations
- * iif: Input interface number
- *
- * As a general policy, this structure keeps all addresses in network
- * byte order, and all else in host byte order. Thus dst, src, dst_port
- * and src_port are in network order. All else is in host order.
- *
- */
-#define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
-struct dn_skb_cb {
- __le16 dst;
- __le16 src;
- __u16 hops;
- __le16 dst_port;
- __le16 src_port;
- __u8 services;
- __u8 info;
- __u8 rt_flags;
- __u8 nsp_flags;
- __u16 segsize;
- __u16 segnum;
- __u16 xmit_count;
- unsigned long stamp;
- int iif;
-};
-
-static inline __le16 dn_eth2dn(const unsigned char *ethaddr)
-{
- return get_unaligned((__le16 *)(ethaddr + 4));
-}
-
-static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr)
-{
- return *(__le16 *)saddr->sdn_nodeaddr;
-}
-
-static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
-{
- __u16 a = le16_to_cpu(addr);
- ethaddr[0] = 0xAA;
- ethaddr[1] = 0x00;
- ethaddr[2] = 0x04;
- ethaddr[3] = 0x00;
- ethaddr[4] = (__u8)(a & 0xff);
- ethaddr[5] = (__u8)(a >> 8);
-}
-
-static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
-{
- fld->fld_sport = scp->addrloc;
- fld->fld_dport = scp->addrrem;
-}
-
-unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
-#define DN_MENUVER_ACC 0x01
-#define DN_MENUVER_USR 0x02
-#define DN_MENUVER_PRX 0x04
-#define DN_MENUVER_UIC 0x08
-
-struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-struct sock *dn_find_by_skb(struct sk_buff *skb);
-#define DN_ASCBUF_LEN 9
-char *dn_addr2asc(__u16, char *);
-int dn_destroy_timer(struct sock *sk);
-
-int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
- unsigned char type);
-int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
- unsigned char *type);
-
-void dn_start_slow_timer(struct sock *sk);
-void dn_stop_slow_timer(struct sock *sk);
-
-extern __le16 decnet_address;
-extern int decnet_debug_level;
-extern int decnet_time_wait;
-extern int decnet_dn_count;
-extern int decnet_di_count;
-extern int decnet_dr_count;
-extern int decnet_no_fc_max_cwnd;
-
-extern long sysctl_decnet_mem[3];
-extern int sysctl_decnet_wmem[3];
-extern int sysctl_decnet_rmem[3];
-
-#endif /* _NET_DN_H */
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
deleted file mode 100644
index bec303ea8367..000000000000
--- a/include/net/dn_dev.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_DEV_H
-#define _NET_DN_DEV_H
-
-#include <linux/netdevice.h>
-
-struct dn_dev;
-
-struct dn_ifaddr {
- struct dn_ifaddr __rcu *ifa_next;
- struct dn_dev *ifa_dev;
- __le16 ifa_local;
- __le16 ifa_address;
- __u32 ifa_flags;
- __u8 ifa_scope;
- char ifa_label[IFNAMSIZ];
- struct rcu_head rcu;
-};
-
-#define DN_DEV_S_RU 0 /* Run - working normally */
-#define DN_DEV_S_CR 1 /* Circuit Rejected */
-#define DN_DEV_S_DS 2 /* Data Link Start */
-#define DN_DEV_S_RI 3 /* Routing Layer Initialize */
-#define DN_DEV_S_RV 4 /* Routing Layer Verify */
-#define DN_DEV_S_RC 5 /* Routing Layer Complete */
-#define DN_DEV_S_OF 6 /* Off */
-#define DN_DEV_S_HA 7 /* Halt */
-
-
-/*
- * The dn_dev_parms structure contains the set of parameters
- * for each device (hence inclusion in the dn_dev structure)
- * and an array is used to store the default types of supported
- * device (in dn_dev.c).
- *
- * The type field matches the ARPHRD_ constants and is used in
- * searching the list for supported devices when new devices
- * come up.
- *
- * The mode field is used to find out if a device is broadcast,
- * multipoint, or pointopoint. Please note that DECnet thinks
- * different ways about devices to the rest of the kernel
- * so the normal IFF_xxx flags are invalid here. For devices
- * which can be any combination of the previously mentioned
- * attributes, you can set this on a per device basis by
- * installing an up() routine.
- *
- * The device state field, defines the initial state in which the
- * device will come up. In the dn_dev structure, it is the actual
- * state.
- *
- * Things have changed here. I've killed timer1 since it's a user space
- * issue for a user space routing deamon to sort out. The kernel does
- * not need to be bothered with it.
- *
- * Timers:
- * t2 - Rate limit timer, min time between routing and hello messages
- * t3 - Hello timer, send hello messages when it expires
- *
- * Callbacks:
- * up() - Called to initialize device, return value can veto use of
- * device with DECnet.
- * down() - Called to turn device off when it goes down
- * timer3() - Called once for each ifaddr when timer 3 goes off
- *
- * sysctl - Hook for sysctl things
- *
- */
-struct dn_dev_parms {
- int type; /* ARPHRD_xxx */
- int mode; /* Broadcast, Unicast, Mulitpoint */
-#define DN_DEV_BCAST 1
-#define DN_DEV_UCAST 2
-#define DN_DEV_MPOINT 4
- int state; /* Initial state */
- int forwarding; /* 0=EndNode, 1=L1Router, 2=L2Router */
- unsigned long t2; /* Default value of t2 */
- unsigned long t3; /* Default value of t3 */
- int priority; /* Priority to be a router */
- char *name; /* Name for sysctl */
- int (*up)(struct net_device *);
- void (*down)(struct net_device *);
- void (*timer3)(struct net_device *, struct dn_ifaddr *ifa);
- void *sysctl;
-};
-
-
-struct dn_dev {
- struct dn_ifaddr __rcu *ifa_list;
- struct net_device *dev;
- struct dn_dev_parms parms;
- char use_long;
- struct timer_list timer;
- unsigned long t3;
- struct neigh_parms *neigh_parms;
- __u8 addr[ETH_ALEN];
- struct neighbour *router; /* Default router on circuit */
- struct neighbour *peer; /* Peer on pointopoint links */
- unsigned long uptime; /* Time device went up in jiffies */
-};
-
-struct dn_short_packet {
- __u8 msgflg;
- __le16 dstnode;
- __le16 srcnode;
- __u8 forward;
-} __packed;
-
-struct dn_long_packet {
- __u8 msgflg;
- __u8 d_area;
- __u8 d_subarea;
- __u8 d_id[6];
- __u8 s_area;
- __u8 s_subarea;
- __u8 s_id[6];
- __u8 nl2;
- __u8 visit_ct;
- __u8 s_class;
- __u8 pt;
-} __packed;
-
-/*------------------------- DRP - Routing messages ---------------------*/
-
-struct endnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 area;
- __u8 seed[8];
- __u8 neighbor[6];
- __le16 timer;
- __u8 mpd;
- __u8 datalen;
- __u8 data[2];
-} __packed;
-
-struct rtnode_hello_message {
- __u8 msgflg;
- __u8 tiver[3];
- __u8 id[6];
- __u8 iinfo;
- __le16 blksize;
- __u8 priority;
- __u8 area;
- __le16 timer;
- __u8 mpd;
-} __packed;
-
-
-void dn_dev_init(void);
-void dn_dev_cleanup(void);
-
-int dn_dev_ioctl(unsigned int cmd, void __user *arg);
-
-void dn_dev_devices_off(void);
-void dn_dev_devices_on(void);
-
-void dn_dev_init_pkt(struct sk_buff *skb);
-void dn_dev_veri_pkt(struct sk_buff *skb);
-void dn_dev_hello(struct sk_buff *skb);
-
-void dn_dev_up(struct net_device *);
-void dn_dev_down(struct net_device *);
-
-int dn_dev_set_default(struct net_device *dev, int force);
-struct net_device *dn_dev_get_default(void);
-int dn_dev_bind_default(__le16 *addr);
-
-int register_dnaddr_notifier(struct notifier_block *nb);
-int unregister_dnaddr_notifier(struct notifier_block *nb);
-
-static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
-{
- struct dn_dev *dn_db;
- struct dn_ifaddr *ifa;
- int res = 0;
-
- rcu_read_lock();
- dn_db = rcu_dereference(dev->dn_ptr);
- if (dn_db == NULL) {
- printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
- goto out;
- }
-
- for (ifa = rcu_dereference(dn_db->ifa_list);
- ifa != NULL;
- ifa = rcu_dereference(ifa->ifa_next))
- if ((addr ^ ifa->ifa_local) == 0) {
- res = 1;
- break;
- }
-out:
- rcu_read_unlock();
- return res;
-}
-
-#endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
deleted file mode 100644
index 1929a3cd5ebe..000000000000
--- a/include/net/dn_fib.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_FIB_H
-#define _NET_DN_FIB_H
-
-#include <linux/netlink.h>
-#include <linux/refcount.h>
-#include <linux/rtnetlink.h>
-#include <net/fib_rules.h>
-
-extern const struct nla_policy rtm_dn_policy[];
-
-struct dn_fib_res {
- struct fib_rule *r;
- struct dn_fib_info *fi;
- unsigned char prefixlen;
- unsigned char nh_sel;
- unsigned char type;
- unsigned char scope;
-};
-
-struct dn_fib_nh {
- struct net_device *nh_dev;
- unsigned int nh_flags;
- unsigned char nh_scope;
- int nh_weight;
- int nh_power;
- int nh_oif;
- __le16 nh_gw;
-};
-
-struct dn_fib_info {
- struct dn_fib_info *fib_next;
- struct dn_fib_info *fib_prev;
- refcount_t fib_treeref;
- refcount_t fib_clntref;
- int fib_dead;
- unsigned int fib_flags;
- int fib_protocol;
- __le16 fib_prefsrc;
- __u32 fib_priority;
- __u32 fib_metrics[RTAX_MAX];
- int fib_nhs;
- int fib_power;
- struct dn_fib_nh fib_nh[0];
-#define dn_fib_dev fib_nh[0].nh_dev
-};
-
-
-#define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
-#define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
-
-#define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
-#define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
-#define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
-#define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
-
-typedef struct {
- __le16 datum;
-} dn_fib_key_t;
-
-typedef struct {
- __le16 datum;
-} dn_fib_hash_t;
-
-typedef struct {
- __u16 datum;
-} dn_fib_idx_t;
-
-struct dn_fib_node {
- struct dn_fib_node *fn_next;
- struct dn_fib_info *fn_info;
-#define DN_FIB_INFO(f) ((f)->fn_info)
- dn_fib_key_t fn_key;
- u8 fn_type;
- u8 fn_scope;
- u8 fn_state;
-};
-
-
-struct dn_fib_table {
- struct hlist_node hlist;
- u32 n;
-
- int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
- struct nlattr *attrs[], struct nlmsghdr *n,
- struct netlink_skb_parms *req);
- int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
- struct dn_fib_res *res);
- int (*flush)(struct dn_fib_table *t);
- int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
-
- unsigned char data[];
-};
-
-#ifdef CONFIG_DECNET_ROUTER
-/*
- * dn_fib.c
- */
-void dn_fib_init(void);
-void dn_fib_cleanup(void);
-
-int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
- struct nlattr *attrs[],
- const struct nlmsghdr *nlh, int *errp);
-int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
- const struct flowidn *fld, struct dn_fib_res *res);
-void dn_fib_release_info(struct dn_fib_info *fi);
-void dn_fib_flush(void);
-void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
-
-/*
- * dn_tables.c
- */
-struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-struct dn_fib_table *dn_fib_empty_table(void);
-void dn_fib_table_init(void);
-void dn_fib_table_cleanup(void);
-
-/*
- * dn_rules.c
- */
-void dn_fib_rules_init(void);
-void dn_fib_rules_cleanup(void);
-unsigned int dnet_addr_type(__le16 addr);
-int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
-
-int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
-void dn_fib_free_info(struct dn_fib_info *fi);
-
-static inline void dn_fib_info_put(struct dn_fib_info *fi)
-{
- if (refcount_dec_and_test(&fi->fib_clntref))
- dn_fib_free_info(fi);
-}
-
-static inline void dn_fib_res_put(struct dn_fib_res *res)
-{
- if (res->fi)
- dn_fib_info_put(res->fi);
- if (res->r)
- fib_rule_put(res->r);
-}
-
-#else /* Endnode */
-
-#define dn_fib_init() do { } while(0)
-#define dn_fib_cleanup() do { } while(0)
-
-#define dn_fib_lookup(fl, res) (-ESRCH)
-#define dn_fib_info_put(fi) do { } while(0)
-#define dn_fib_select_multipath(fl, res) do { } while(0)
-#define dn_fib_rules_policy(saddr,res,flags) (0)
-#define dn_fib_res_put(res) do { } while(0)
-
-#endif /* CONFIG_DECNET_ROUTER */
-
-static inline __le16 dnet_make_mask(int n)
-{
- if (n)
- return cpu_to_le16(~((1 << (16 - n)) - 1));
- return cpu_to_le16(0);
-}
-
-#endif /* _NET_DN_FIB_H */
diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h
deleted file mode 100644
index 1f7df98bfc33..000000000000
--- a/include/net/dn_neigh.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_DN_NEIGH_H
-#define _NET_DN_NEIGH_H
-
-#include <net/neighbour.h>
-
-/*
- * The position of the first two fields of
- * this structure are critical - SJW
- */
-struct dn_neigh {
- struct neighbour n;
- __le16 addr;
- unsigned long flags;
-#define DN_NDFLAG_R1 0x0001 /* Router L1 */
-#define DN_NDFLAG_R2 0x0002 /* Router L2 */
-#define DN_NDFLAG_P3 0x0004 /* Phase III Node */
- unsigned long blksize;
- __u8 priority;
-};
-
-void dn_neigh_init(void);
-void dn_neigh_cleanup(void);
-int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
-void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
-int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
-
-extern struct neigh_table dn_neigh_table;
-
-#endif /* _NET_DN_NEIGH_H */
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
deleted file mode 100644
index a4a18fee0b7c..000000000000
--- a/include/net/dn_nsp.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_NSP_H
-#define _NET_DN_NSP_H
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-/* dn_nsp.c functions prototyping */
-#include <linux/atomic.h>
-#include <linux/types.h>
-#include <net/sock.h>
-
-struct sk_buff;
-struct sk_buff_head;
-
-void dn_nsp_send_data_ack(struct sock *sk);
-void dn_nsp_send_oth_ack(struct sock *sk);
-void dn_send_conn_ack(struct sock *sk);
-void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-void dn_nsp_send_disc(struct sock *sk, unsigned char type,
- unsigned short reason, gfp_t gfp);
-void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
- unsigned short reason);
-void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-void dn_nsp_output(struct sock *sk);
-int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *q, unsigned short acknum);
-void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
- int oob);
-unsigned long dn_nsp_persist(struct sock *sk);
-int dn_nsp_xmit_timeout(struct sock *sk);
-
-int dn_nsp_rx(struct sk_buff *);
-int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
- long timeo, int *err);
-
-#define NSP_REASON_OK 0 /* No error */
-#define NSP_REASON_NR 1 /* No resources */
-#define NSP_REASON_UN 2 /* Unrecognised node name */
-#define NSP_REASON_SD 3 /* Node shutting down */
-#define NSP_REASON_ID 4 /* Invalid destination end user */
-#define NSP_REASON_ER 5 /* End user lacks resources */
-#define NSP_REASON_OB 6 /* Object too busy */
-#define NSP_REASON_US 7 /* Unspecified error */
-#define NSP_REASON_TP 8 /* Third-Party abort */
-#define NSP_REASON_EA 9 /* End user has aborted the link */
-#define NSP_REASON_IF 10 /* Invalid node name format */
-#define NSP_REASON_LS 11 /* Local node shutdown */
-#define NSP_REASON_LL 32 /* Node lacks logical-link resources */
-#define NSP_REASON_LE 33 /* End user lacks logical-link resources */
-#define NSP_REASON_UR 34 /* Unacceptable RQSTRID or PASSWORD field */
-#define NSP_REASON_UA 36 /* Unacceptable ACCOUNT field */
-#define NSP_REASON_TM 38 /* End user timed out logical link */
-#define NSP_REASON_NU 39 /* Node unreachable */
-#define NSP_REASON_NL 41 /* No-link message */
-#define NSP_REASON_DC 42 /* Disconnect confirm */
-#define NSP_REASON_IO 43 /* Image data field overflow */
-
-#define NSP_DISCINIT 0x38
-#define NSP_DISCCONF 0x48
-
-/*------------------------- NSP - messages ------------------------------*/
-/* Data Messages */
-/*---------------*/
-
-/* Data Messages (data segment/interrupt/link service) */
-
-struct nsp_data_seg_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
-} __packed;
-
-struct nsp_data_opt_msg {
- __le16 acknum;
- __le16 segnum;
- __le16 lsflgs;
-} __packed;
-
-struct nsp_data_opt_msg1 {
- __le16 acknum;
- __le16 segnum;
-} __packed;
-
-
-/* Acknowledgment Message (data/other data) */
-struct nsp_data_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 acknum;
-} __packed;
-
-/* Connect Acknowledgment Message */
-struct nsp_conn_ack_msg {
- __u8 msgflg;
- __le16 dstaddr;
-} __packed;
-
-
-/* Connect Initiate/Retransmit Initiate/Connect Confirm */
-struct nsp_conn_init_msg {
- __u8 msgflg;
-#define NSP_CI 0x18 /* Connect Initiate */
-#define NSP_RCI 0x68 /* Retrans. Conn Init */
- __le16 dstaddr;
- __le16 srcaddr;
- __u8 services;
-#define NSP_FC_NONE 0x00 /* Flow Control None */
-#define NSP_FC_SRC 0x04 /* Seg Req. Count */
-#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
-#define NSP_FC_MASK 0x0c /* FC type mask */
- __u8 info;
- __le16 segsize;
-} __packed;
-
-/* Disconnect Initiate/Disconnect Confirm */
-struct nsp_disconn_init_msg {
- __u8 msgflg;
- __le16 dstaddr;
- __le16 srcaddr;
- __le16 reason;
-} __packed;
-
-
-
-struct srcobj_fmt {
- __u8 format;
- __u8 task;
- __le16 grpcode;
- __le16 usrcode;
- __u8 dlen;
-} __packed;
-
-/*
- * A collection of functions for manipulating the sequence
- * numbers used in NSP. Similar in operation to the functions
- * of the same name in TCP.
- */
-static __inline__ int dn_before(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq1 - seq2) & 0x0fff) > 2048;
-}
-
-
-static __inline__ int dn_after(__u16 seq1, __u16 seq2)
-{
- seq1 &= 0x0fff;
- seq2 &= 0x0fff;
-
- return (int)((seq2 - seq1) & 0x0fff) > 2048;
-}
-
-static __inline__ int dn_equal(__u16 seq1, __u16 seq2)
-{
- return ((seq1 ^ seq2) & 0x0fff) == 0;
-}
-
-static __inline__ int dn_before_or_equal(__u16 seq1, __u16 seq2)
-{
- return (dn_before(seq1, seq2) || dn_equal(seq1, seq2));
-}
-
-static __inline__ void seq_add(__u16 *seq, __u16 off)
-{
- (*seq) += off;
- (*seq) &= 0x0fff;
-}
-
-static __inline__ int seq_next(__u16 seq1, __u16 seq2)
-{
- return dn_equal(seq1 + 1, seq2);
-}
-
-/*
- * Can we delay the ack ?
- */
-static __inline__ int sendack(__u16 seq)
-{
- return (int)((seq & 0x1000) ? 0 : 1);
-}
-
-/*
- * Is socket congested ?
- */
-static __inline__ int dn_congested(struct sock *sk)
-{
- return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
-}
-
-#define DN_MAX_NSP_DATA_HEADER (11)
-
-#endif /* _NET_DN_NSP_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
deleted file mode 100644
index 88c0300236cc..000000000000
--- a/include/net/dn_route.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _NET_DN_ROUTE_H
-#define _NET_DN_ROUTE_H
-
-/******************************************************************************
- (c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
-*******************************************************************************/
-
-#include <linux/types.h>
-#include <net/dst.h>
-
-struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
- struct sock *sk, int flags);
-int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-void dn_rt_cache_flush(int delay);
-int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-
-/* Masks for flags field */
-#define DN_RT_F_PID 0x07 /* Mask for packet type */
-#define DN_RT_F_PF 0x80 /* Padding Follows */
-#define DN_RT_F_VER 0x40 /* Version =0 discard packet if ==1 */
-#define DN_RT_F_IE 0x20 /* Intra Ethernet, Reserved in short pkt */
-#define DN_RT_F_RTS 0x10 /* Packet is being returned to sender */
-#define DN_RT_F_RQR 0x08 /* Return packet to sender upon non-delivery */
-
-/* Mask for types of routing packets */
-#define DN_RT_PKT_MSK 0x06
-/* Types of routing packets */
-#define DN_RT_PKT_SHORT 0x02 /* Short routing packet */
-#define DN_RT_PKT_LONG 0x06 /* Long routing packet */
-
-/* Mask for control/routing selection */
-#define DN_RT_PKT_CNTL 0x01 /* Set to 1 if a control packet */
-/* Types of control packets */
-#define DN_RT_CNTL_MSK 0x0f /* Mask for control packets */
-#define DN_RT_PKT_INIT 0x01 /* Initialisation packet */
-#define DN_RT_PKT_VERI 0x03 /* Verification Message */
-#define DN_RT_PKT_HELO 0x05 /* Hello and Test Message */
-#define DN_RT_PKT_L1RT 0x07 /* Level 1 Routing Message */
-#define DN_RT_PKT_L2RT 0x09 /* Level 2 Routing Message */
-#define DN_RT_PKT_ERTH 0x0b /* Ethernet Router Hello */
-#define DN_RT_PKT_EEDH 0x0d /* Ethernet EndNode Hello */
-
-/* Values for info field in hello message */
-#define DN_RT_INFO_TYPE 0x03 /* Type mask */
-#define DN_RT_INFO_L1RT 0x02 /* L1 Router */
-#define DN_RT_INFO_L2RT 0x01 /* L2 Router */
-#define DN_RT_INFO_ENDN 0x03 /* EndNode */
-#define DN_RT_INFO_VERI 0x04 /* Verification Reqd. */
-#define DN_RT_INFO_RJCT 0x08 /* Reject Flag, Reserved */
-#define DN_RT_INFO_VFLD 0x10 /* Verification Failed, Reserved */
-#define DN_RT_INFO_NOML 0x20 /* No Multicast traffic accepted */
-#define DN_RT_INFO_BLKR 0x40 /* Blocking Requested */
-
-/*
- * The fl structure is what we used to look up the route.
- * The rt_saddr & rt_daddr entries are the same as key.saddr & key.daddr
- * except for local input routes, where the rt_saddr = fl.fld_dst and
- * rt_daddr = fl.fld_src to allow the route to be used for returning
- * packets to the originating host.
- */
-struct dn_route {
- struct dst_entry dst;
- struct dn_route __rcu *dn_next;
-
- struct neighbour *n;
-
- struct flowidn fld;
-
- __le16 rt_saddr;
- __le16 rt_daddr;
- __le16 rt_gateway;
- __le16 rt_local_src; /* Source used for forwarding packets */
- __le16 rt_src_map;
- __le16 rt_dst_map;
-
- unsigned int rt_flags;
- unsigned int rt_type;
-};
-
-static inline bool dn_is_input_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif != 0;
-}
-
-static inline bool dn_is_output_route(struct dn_route *rt)
-{
- return rt->fld.flowidn_iif == 0;
-}
-
-void dn_route_init(void);
-void dn_route_cleanup(void);
-
-#include <net/sock.h>
-#include <linux/if_arp.h>
-
-static inline void dn_rt_send(struct sk_buff *skb)
-{
- dev_queue_xmit(skb);
-}
-
-static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src)
-{
- struct net_device *dev = skb->dev;
-
- if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
- dst = NULL;
-
- if (dev_hard_header(skb, dev, ETH_P_DNA_RT, dst, src, skb->len) >= 0)
- dn_rt_send(skb);
- else
- kfree_skb(skb);
-}
-
-#endif /* _NET_DN_ROUTE_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b902b31bebce..ee369670e20e 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -294,12 +294,13 @@ struct dsa_port {
u8 lag_tx_enabled:1;
- u8 devlink_port_setup:1;
-
/* Master state bits, valid only on CPU ports */
u8 master_admin_up:1;
u8 master_oper_up:1;
+ /* Valid only on user ports */
+ u8 cpu_port_in_lag:1;
+
u8 setup:1;
struct device_node *dn;
@@ -559,6 +560,14 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
list_for_each_entry((_dp), &(_dst)->ports, list) \
if (dsa_port_is_user((_dp)))
+#define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
+ list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_user((_dp)))
+
+#define dsa_tree_for_each_cpu_port(_dp, _dst) \
+ list_for_each_entry((_dp), &(_dst)->ports, list) \
+ if (dsa_port_is_cpu((_dp)))
+
#define dsa_switch_for_each_port(_dp, _ds) \
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
if ((_dp)->ds == (_ds))
@@ -714,6 +723,14 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
return dsa_port_lag_dev_get(dp) == lag->dev;
}
+static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
+{
+ if (dp->cpu_port_in_lag)
+ return dsa_port_lag_dev_get(dp->cpu_dp);
+
+ return dp->cpu_dp->master;
+}
+
static inline
struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
{
@@ -798,6 +815,12 @@ dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
return false;
}
+static inline bool dsa_port_tree_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ return a->ds->dst == b->ds->dst;
+}
+
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
@@ -821,6 +844,10 @@ struct dsa_switch_ops {
int (*connect_tag_protocol)(struct dsa_switch *ds,
enum dsa_tag_protocol proto);
+ int (*port_change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
+
/* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
@@ -1077,7 +1104,8 @@ struct dsa_switch_ops {
int port);
int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info);
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag);
@@ -1152,7 +1180,8 @@ struct dsa_switch_ops {
int (*port_lag_change)(struct dsa_switch *ds, int port);
int (*port_lag_join)(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info);
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
int (*port_lag_leave)(struct dsa_switch *ds, int port,
struct dsa_lag lag);
diff --git a/include/net/dst.h b/include/net/dst.h
index 6aa252c3fc55..00b479ce6b99 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -239,12 +239,6 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
}
}
-static inline void dst_hold_and_use(struct dst_entry *dst, unsigned long time)
-{
- dst_hold(dst);
- dst_use_noref(dst, time);
-}
-
static inline struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index adab27ba1ecb..a454cf4327fe 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -4,11 +4,14 @@
#include <linux/skbuff.h>
#include <net/ip_tunnels.h>
+#include <net/macsec.h>
#include <net/dst.h>
enum metadata_type {
METADATA_IP_TUNNEL,
METADATA_HW_PORT_MUX,
+ METADATA_MACSEC,
+ METADATA_XFRM,
};
struct hw_port_info {
@@ -16,12 +19,23 @@ struct hw_port_info {
u32 port_id;
};
+struct macsec_info {
+ sci_t sci;
+};
+
+struct xfrm_md_info {
+ u32 if_id;
+ int link;
+};
+
struct metadata_dst {
struct dst_entry dst;
enum metadata_type type;
union {
struct ip_tunnel_info tun_info;
struct hw_port_info port_info;
+ struct macsec_info macsec_info;
+ struct xfrm_md_info xfrm_info;
} u;
};
@@ -53,6 +67,27 @@ skb_tunnel_info(const struct sk_buff *skb)
return NULL;
}
+static inline struct xfrm_md_info *lwt_xfrm_info(struct lwtunnel_state *lwt)
+{
+ return (struct xfrm_md_info *)lwt->data;
+}
+
+static inline struct xfrm_md_info *skb_xfrm_md_info(const struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct dst_entry *dst;
+
+ if (md_dst && md_dst->type == METADATA_XFRM)
+ return &md_dst->u.xfrm_info;
+
+ dst = skb_dst(skb);
+ if (dst && dst->lwtstate &&
+ dst->lwtstate->type == LWTUNNEL_ENCAP_XFRM)
+ return lwt_xfrm_info(dst->lwtstate);
+
+ return NULL;
+}
+
static inline bool skb_valid_dst(const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -82,6 +117,12 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) +
a->u.tun_info.options_len);
+ case METADATA_MACSEC:
+ return memcmp(&a->u.macsec_info, &b->u.macsec_info,
+ sizeof(a->u.macsec_info));
+ case METADATA_XFRM:
+ return memcmp(&a->u.xfrm_info, &b->u.xfrm_info,
+ sizeof(a->u.xfrm_info));
default:
return 1;
}
diff --git a/include/net/flow.h b/include/net/flow.h
index 987bd511d652..2f0da4f0318b 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -54,11 +54,6 @@ union flowi_uli {
__u8 code;
} icmpt;
- struct {
- __le16 dport;
- __le16 sport;
- } dnports;
-
__be32 gre_key;
struct {
@@ -156,27 +151,11 @@ struct flowi6 {
__u32 mp_hash;
} __attribute__((__aligned__(BITS_PER_LONG/8)));
-struct flowidn {
- struct flowi_common __fl_common;
-#define flowidn_oif __fl_common.flowic_oif
-#define flowidn_iif __fl_common.flowic_iif
-#define flowidn_mark __fl_common.flowic_mark
-#define flowidn_scope __fl_common.flowic_scope
-#define flowidn_proto __fl_common.flowic_proto
-#define flowidn_flags __fl_common.flowic_flags
- __le16 daddr;
- __le16 saddr;
- union flowi_uli uli;
-#define fld_sport uli.ports.sport
-#define fld_dport uli.ports.dport
-} __attribute__((__aligned__(BITS_PER_LONG/8)));
-
struct flowi {
union {
struct flowi_common __fl_common;
struct flowi4 ip4;
struct flowi6 ip6;
- struct flowidn dn;
} u;
#define flowi_oif u.__fl_common.flowic_oif
#define flowi_iif u.__fl_common.flowic_iif
@@ -211,11 +190,6 @@ static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6)
return &(fl6->__fl_common);
}
-static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
-{
- return container_of(fldn, struct flowi, u.dn);
-}
-
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
#endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 6c74812d64b2..5ccf52ef8809 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -289,6 +289,14 @@ struct flow_dissector_key_pppoe {
__be16 type;
};
+/**
+ * struct flow_dissector_key_l2tpv3:
+ * @session_id: identifier for a l2tp session
+ */
+struct flow_dissector_key_l2tpv3 {
+ __be32 session_id;
+};
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -320,6 +328,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */
FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */
+ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 2a9a9e42e7fd..e343f9f8363e 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -80,6 +80,10 @@ struct flow_match_pppoe {
struct flow_dissector_key_pppoe *key, *mask;
};
+struct flow_match_l2tpv3 {
+ struct flow_dissector_key_l2tpv3 *key, *mask;
+};
+
struct flow_rule;
void flow_rule_match_meta(const struct flow_rule *rule,
@@ -128,6 +132,8 @@ void flow_rule_match_ct(const struct flow_rule *rule,
struct flow_match_ct *out);
void flow_rule_match_pppoe(const struct flow_rule *rule,
struct flow_match_pppoe *out);
+void flow_rule_match_l2tpv3(const struct flow_rule *rule,
+ struct flow_match_l2tpv3 *out);
enum flow_action_id {
FLOW_ACTION_ACCEPT = 0,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 56a50e1c51b9..8f780170e2f8 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -39,6 +39,8 @@ struct genl_info;
* undo operations done by pre_doit, for example release locks
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
+ * @resv_start_op: first operation for which reserved fields of the header
+ * can be validated, new families should leave this field at zero
* @mcgrp_offset: starting number of multicast group IDs in this family
* (private)
* @ops: the operations supported by this family
@@ -58,6 +60,7 @@ struct genl_family {
u8 n_ops;
u8 n_small_ops;
u8 n_mcgrps;
+ u8 resv_start_op;
const struct nla_policy *policy;
int (*pre_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
@@ -107,6 +110,13 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
+/* Report that a root attribute is missing */
+#define GENL_REQ_ATTR_CHECK(info, attr) ({ \
+ struct genl_info *__info = (info); \
+ \
+ NL_REQ_ATTR_CHECK(__info->extack, NULL, __info->attrs, (attr)); \
+})
+
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
GENL_DONT_VALIDATE_DUMP = BIT(1),
diff --git a/include/net/gro.h b/include/net/gro.h
index 24003dea8fa4..a4fab706240d 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -160,6 +160,17 @@ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
return skb->data + offset;
}
+static inline void *skb_gro_header(struct sk_buff *skb,
+ unsigned int hlen, unsigned int offset)
+{
+ void *ptr;
+
+ ptr = skb_gro_header_fast(skb, offset);
+ if (skb_gro_header_hard(skb, hlen))
+ ptr = skb_gro_header_slow(skb, hlen, offset);
+ return ptr;
+}
+
static inline void *skb_gro_network_header(struct sk_buff *skb)
{
return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
@@ -301,12 +312,9 @@ static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
return ptr;
}
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
+ ptr = skb_gro_header(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
start, offset);
@@ -329,12 +337,9 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
if (!grc->delta)
return;
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
+ ptr = skb_gro_header(skb, plen, grc->offset);
+ if (!ptr)
+ return;
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
@@ -405,9 +410,7 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
off = skb_gro_offset(skb);
hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen))
- uh = skb_gro_header_slow(skb, hlen, off);
+ uh = skb_gro_header(skb, hlen, off);
return uh;
}
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index d0d188c3294b..03b64bf876a4 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -15,6 +15,22 @@
#ifndef IEEE802154_NETDEVICE_H
#define IEEE802154_NETDEVICE_H
+#define IEEE802154_REQUIRED_SIZE(struct_type, member) \
+ (offsetof(typeof(struct_type), member) + \
+ sizeof(((typeof(struct_type) *)(NULL))->member))
+
+#define IEEE802154_ADDR_OFFSET \
+ offsetof(typeof(struct sockaddr_ieee802154), addr)
+
+#define IEEE802154_MIN_NAMELEN (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, addr_type))
+
+#define IEEE802154_NAMELEN_SHORT (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, short_addr))
+
+#define IEEE802154_NAMELEN_LONG (IEEE802154_ADDR_OFFSET + \
+ IEEE802154_REQUIRED_SIZE(struct ieee802154_addr_sa, hwaddr))
+
#include <net/af_ieee802154.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -165,6 +181,33 @@ static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
memcpy(raw, &temp, IEEE802154_ADDR_LEN);
}
+static inline int
+ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len)
+{
+ struct ieee802154_addr_sa *sa;
+ int ret = 0;
+
+ sa = &daddr->addr;
+ if (len < IEEE802154_MIN_NAMELEN)
+ return -EINVAL;
+ switch (sa->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (len < IEEE802154_NAMELEN_SHORT)
+ ret = -EINVAL;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (len < IEEE802154_NAMELEN_LONG)
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
const struct ieee802154_addr_sa *sa)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index ee88f0f1350f..c2b15f7e5516 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -25,6 +25,7 @@
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
+struct inet_bind2_bucket;
struct tcp_congestion_ops;
/*
@@ -57,6 +58,7 @@ struct inet_connection_sock_af_ops {
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
+ * @icsk_bind2_hash: Bind node in the bhash2 table
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
@@ -83,6 +85,7 @@ struct inet_connection_sock {
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
+ struct inet_bind2_bucket *icsk_bind2_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index e9cf2157ed8a..3af1e927247d 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -23,6 +23,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
+#include <net/ip.h>
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
@@ -90,7 +91,31 @@ struct inet_bind_bucket {
struct hlist_head owners;
};
-static inline struct net *ib_net(struct inet_bind_bucket *ib)
+struct inet_bind2_bucket {
+ possible_net_t ib_net;
+ int l3mdev;
+ unsigned short port;
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned short family;
+#endif
+ union {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr v6_rcv_saddr;
+#endif
+ __be32 rcv_saddr;
+ };
+ /* Node in the bhash2 inet_bind_hashbucket chain */
+ struct hlist_node node;
+ /* List of sockets hashed to this bucket */
+ struct hlist_head owners;
+};
+
+static inline struct net *ib_net(const struct inet_bind_bucket *ib)
+{
+ return read_pnet(&ib->ib_net);
+}
+
+static inline struct net *ib2_net(const struct inet_bind2_bucket *ib)
{
return read_pnet(&ib->ib_net);
}
@@ -133,14 +158,33 @@ struct inet_hashinfo {
* TCP hash as well as the others for fast bind/connect.
*/
struct kmem_cache *bind_bucket_cachep;
+ /* This bind table is hashed by local port */
struct inet_bind_hashbucket *bhash;
+ struct kmem_cache *bind2_bucket_cachep;
+ /* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4)
+ * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used
+ * primarily for expediting bind conflict resolution.
+ */
+ struct inet_bind_hashbucket *bhash2;
unsigned int bhash_size;
/* The 2nd listener table hashed by local port and address */
unsigned int lhash2_mask;
struct inet_listen_hashbucket *lhash2;
+
+ bool pernet;
};
+static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IP_DCCP)
+ return sk->sk_prot->h.hashinfo ? :
+ sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#else
+ return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
+#endif
+}
+
static inline struct inet_listen_hashbucket *
inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
{
@@ -175,6 +219,10 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
hashinfo->ehash_locks = NULL;
}
+struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
+ unsigned int ehash_entries);
+void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo);
+
struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
@@ -182,14 +230,61 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
+bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
+ struct inet_bind_hashbucket *head,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
+ struct inet_bind2_bucket *tb);
+
+struct inet_bind2_bucket *
+inet_bind2_bucket_find(const struct inet_bind_hashbucket *head,
+ const struct net *net,
+ unsigned short port, int l3mdev,
+ const struct sock *sk);
+
+bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb,
+ const struct net *net, unsigned short port,
+ int l3mdev, const struct sock *sk);
+
static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
const u32 bhash_size)
{
return (lport + net_hash_mix(net)) & (bhash_size - 1);
}
+static inline struct inet_bind_hashbucket *
+inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
+ const struct net *net, unsigned short port)
+{
+ u32 hash;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
+ else
+#endif
+ hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
+ return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
+}
+
+struct inet_bind_hashbucket *
+inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
+
+/* This should be called whenever a socket's sk_rcv_saddr (ipv4) or
+ * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
+ * rcv_saddr field should already have been updated when this is called.
+ */
+int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
+
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum);
+ struct inet_bind2_bucket *tb2, unsigned short port);
/* Caller must disable local BH processing. */
int __inet_inherit_port(const struct sock *sk, struct sock *child);
diff --git a/include/net/ip.h b/include/net/ip.h
index 1c979fd1904c..038097c2a152 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -743,8 +743,12 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
+int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+int do_ip_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen);
int ip_ra_control(struct sock *sk, unsigned char on,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index ced80e2f8b58..fca357679816 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -302,6 +302,12 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
+bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *encap);
+
+void ip_tunnel_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms);
+
extern const struct header_ops ip_tunnel_header_ops;
__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
index c31108295079..8660a2a6d1fc 100644
--- a/include/net/ipcomp.h
+++ b/include/net/ipcomp.h
@@ -22,7 +22,7 @@ struct xfrm_state;
int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb);
void ipcomp_destroy(struct xfrm_state *x);
-int ipcomp_init_state(struct xfrm_state *x);
+int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb)
{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index de9dcc5652c4..37943ba3a73c 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1156,8 +1156,12 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
*/
DECLARE_STATIC_KEY_FALSE(ip6_min_hopcount);
+int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
@@ -1178,6 +1182,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+void inet6_cleanup_sock(struct sock *sk);
+void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
@@ -1207,7 +1213,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
struct sockaddr_storage *list);
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
- struct sockaddr_storage __user *p);
+ sockptr_t optval, size_t ss_offset);
#ifdef CONFIG_PROC_FS
int ac6_proc_init(struct net *net);
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 45e0339be6fa..c48186bf4737 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -81,6 +81,10 @@ struct ipv6_bpf_stub {
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
struct sk_buff *skb);
+ int (*ipv6_setsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
+ int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index f198af600b5e..ac2bad57933f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -18,6 +18,7 @@
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
+#include <linux/lockdep.h>
#include <net/cfg80211.h>
#include <net/codel.h>
#include <net/ieee80211_radiotap.h>
@@ -1480,6 +1481,10 @@ enum mac80211_rx_encoding {
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
* @zero_length_psdu_type: radiotap type of the 0-length PSDU
+ * @link_valid: if the link which is identified by @link_id is valid. This flag
+ * is set only when connection is MLO.
+ * @link_id: id of the link used to receive the packet. This is used along with
+ * @link_valid.
*/
struct ieee80211_rx_status {
u64 mactime;
@@ -1504,6 +1509,7 @@ struct ieee80211_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
u8 zero_length_psdu_type;
+ u8 link_valid:1, link_id:4;
};
static inline u32
@@ -1794,6 +1800,9 @@ struct ieee80211_vif_cfg {
* @link_conf: in case of MLD, the per-link BSS configuration,
* indexed by link ID
* @valid_links: bitmap of valid links, or 0 for non-MLO.
+ * @active_links: The bitmap of active links, or 0 for non-MLO.
+ * The driver shouldn't change this directly, but use the
+ * API calls meant for that purpose.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
@@ -1829,7 +1838,7 @@ struct ieee80211_vif {
struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
- u16 valid_links;
+ u16 valid_links, active_links;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
@@ -1856,12 +1865,11 @@ struct ieee80211_vif {
u8 drv_priv[] __aligned(sizeof(void *));
};
-/* FIXME: for now loop over all the available links; later will be changed
- * to loop only over the active links.
- */
-#define for_each_vif_active_link(vif, link, link_id) \
- for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
- if ((link = rcu_dereference((vif)->link_conf[link_id])))
+#define for_each_vif_active_link(vif, link, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ (link = rcu_dereference((vif)->link_conf[link_id])))
static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
{
@@ -1895,6 +1903,19 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
/**
+ * lockdep_vif_mutex_held - for lockdep checks on link poiners
+ * @vif: the interface to check
+ */
+static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif)
+{
+ return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx);
+}
+
+#define link_conf_dereference_protected(vif, link_id) \
+ rcu_dereference_protected((vif)->link_conf[link_id], \
+ lockdep_vif_mutex_held(vif))
+
+/**
* enum ieee80211_key_flags - key flags
*
* These flags are used for communication about keys between the driver
@@ -1975,6 +1996,7 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
+ * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
*/
struct ieee80211_key_conf {
atomic64_t tx_pn;
@@ -1984,6 +2006,7 @@ struct ieee80211_key_conf {
u8 hw_key_idx;
s8 keyidx;
u16 flags;
+ s8 link_id;
u8 keylen;
u8 key[];
};
@@ -2121,6 +2144,34 @@ struct ieee80211_sta_txpwr {
};
/**
+ * struct ieee80211_sta_aggregates - info that is aggregated from active links
+ *
+ * Used for any per-link data that needs to be aggregated and updated in the
+ * main &struct ieee80211_sta when updated or the active links change.
+ *
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes.
+ * This field is always valid for packets with a VHT preamble.
+ * For packets with a HT preamble, additional limits apply:
+ *
+ * * If the skb is transmitted as part of a BA agreement, the
+ * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
+ * size is min(max_amsdu_len, 7935) bytes.
+ *
+ * Both additional HT limits must be enforced by the low level
+ * driver. This is defined by the spec (IEEE 802.11-2012 section
+ * 8.3.2.2 NOTE 2).
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
+ * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ */
+struct ieee80211_sta_aggregates {
+ u16 max_amsdu_len;
+
+ u16 max_rc_amsdu_len;
+ u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
+};
+
+/**
* struct ieee80211_link_sta - station Link specific info
* All link specific info for a STA link for a non MLD STA(single)
* or a MLD STA(multiple entries) are stored here.
@@ -2128,6 +2179,8 @@ struct ieee80211_sta_txpwr {
* @addr: MAC address of the Link STA. For non-MLO STA this is same as the addr
* in ieee80211_sta. For MLO Link STA this addr can be same or different
* from addr in ieee80211_sta (representing MLD STA addr)
+ * @link_id: the link ID for this link STA (0 for deflink)
+ * @smps_mode: current SMPS mode (off, static or dynamic)
* @supp_rates: Bitmap of supported rates
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
@@ -2144,6 +2197,8 @@ struct ieee80211_sta_txpwr {
*/
struct ieee80211_link_sta {
u8 addr[ETH_ALEN];
+ u8 link_id;
+ enum ieee80211_smps_mode smps_mode;
u32 supp_rates[NUM_NL80211_BANDS];
struct ieee80211_sta_ht_cap ht_cap;
@@ -2152,6 +2207,8 @@ struct ieee80211_link_sta {
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
+ struct ieee80211_sta_aggregates agg;
+
u8 rx_nss;
enum ieee80211_sta_rx_bandwidth bandwidth;
struct ieee80211_sta_txpwr txpwr;
@@ -2182,7 +2239,6 @@ struct ieee80211_link_sta {
* if wme is supported. The bits order is like in
* IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
* @max_sp: max Service Period. Only valid if wme is supported.
- * @smps_mode: current SMPS mode (off, static or dynamic)
* @rates: rate control selection table
* @tdls: indicates whether the STA is a TDLS peer
* @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
@@ -2192,9 +2248,10 @@ struct ieee80211_link_sta {
* @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
* A-MSDU. Taken from the Extended Capabilities element. 0 means
* unlimited.
+ * @cur: currently valid data as aggregated from the active links
+ * For non MLO STA it will point to the deflink data. For MLO STA
+ * ieee80211_sta_recalc_aggregates() must be called to update it.
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
- * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
- * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
* the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
* @deflink: This holds the default link STA information, for non MLO STA all link
@@ -2217,7 +2274,6 @@ struct ieee80211_sta {
bool wme;
u8 uapsd_queues;
u8 max_sp;
- enum ieee80211_smps_mode smps_mode;
struct ieee80211_sta_rates __rcu *rates;
bool tdls;
bool tdls_initiator;
@@ -2225,25 +2281,9 @@ struct ieee80211_sta {
bool mlo;
u8 max_amsdu_subframes;
- /**
- * @max_amsdu_len:
- * indicates the maximal length of an A-MSDU in bytes.
- * This field is always valid for packets with a VHT preamble.
- * For packets with a HT preamble, additional limits apply:
- *
- * * If the skb is transmitted as part of a BA agreement, the
- * A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- * * If the skb is not part of a BA agreement, the A-MSDU maximal
- * size is min(max_amsdu_len, 7935) bytes.
- *
- * Both additional HT limits must be enforced by the low level
- * driver. This is defined by the spec (IEEE 802.11-2012 section
- * 8.3.2.2 NOTE 2).
- */
- u16 max_amsdu_len;
+ struct ieee80211_sta_aggregates *cur;
+
bool support_p2p_ps;
- u16 max_rc_amsdu_len;
- u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
@@ -2255,13 +2295,24 @@ struct ieee80211_sta {
u8 drv_priv[] __aligned(sizeof(void *));
};
-/* FIXME: need to loop only over links which are active and check the actual
- * lock
- */
-#define for_each_sta_active_link(sta, link_sta, link_id) \
- for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \
- if (((link_sta) = rcu_dereference_protected((sta)->link[link_id],\
- 1))) \
+#ifdef CONFIG_LOCKDEP
+bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta);
+#else
+static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta)
+{
+ return true;
+}
+#endif
+
+#define link_sta_dereference_protected(sta, link_id) \
+ rcu_dereference_protected((sta)->link[link_id], \
+ lockdep_sta_mutex_held(sta))
+
+#define for_each_sta_active_link(vif, sta, link_sta, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \
+ if ((!(vif)->active_links || \
+ (vif)->active_links & BIT(link_id)) && \
+ ((link_sta) = link_sta_dereference_protected(sta, link_id)))
/**
* enum sta_notify_cmd - sta notify command
@@ -5287,6 +5338,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
* ieee80211_nullfunc_get - retrieve a nullfunc template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @link_id: If the vif is an MLD, get a frame with the link addresses
+ * for the given link ID. For a link_id < 0 you get a frame with
+ * MLD addresses, however useful that might be.
* @qos_ok: QoS NDP is acceptable to the caller, this should be set
* if at all possible
*
@@ -5304,7 +5358,7 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
*/
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- bool qos_ok);
+ int link_id, bool qos_ok);
/**
* ieee80211_probereq_get - retrieve a Probe Request template
@@ -5976,6 +6030,22 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
const u8 *localaddr);
/**
+ * ieee80211_find_sta_by_link_addrs - find STA by link addresses
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @addr: remote station's link address
+ * @localaddr: local link address, use %NULL for any (but avoid that)
+ * @link_id: pointer to obtain the link ID if the STA is found,
+ * may be %NULL if the link ID is not needed
+ *
+ * Obtain the STA by link address, must use RCU protection.
+ */
+struct ieee80211_sta *
+ieee80211_find_sta_by_link_addrs(struct ieee80211_hw *hw,
+ const u8 *addr,
+ const u8 *localaddr,
+ unsigned int *link_id);
+
+/**
* ieee80211_sta_block_awake - block station from waking up
* @hw: the hardware
* @pubsta: the station
@@ -6051,6 +6121,19 @@ void ieee80211_sta_eosp(struct ieee80211_sta *pubsta);
void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
+ * ieee80211_sta_recalc_aggregates - recalculate aggregate data after a change
+ * @pubsta: the station
+ *
+ * Call this function after changing a per-link aggregate data as referenced in
+ * &struct ieee80211_sta_aggregates by accessing the agg field of
+ * &struct ieee80211_link_sta.
+ *
+ * With non MLO the data in deflink will be referenced directly. In that case
+ * there is no need to call this function.
+ */
+void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta);
+
+/**
* ieee80211_sta_register_airtime - register airtime usage for a sta/tid
*
* Register airtime usage for a given sta on a given tid. The driver must call
@@ -7101,4 +7184,45 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
ieee80211_is_data(hdr->frame_control);
}
+/**
+ * ieee80211_set_active_links - set active links in client mode
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * This changes the active links on an interface. The interface
+ * must be in client mode (in AP mode, all links are always active),
+ * and @active_links must be a subset of the vif's valid_links.
+ *
+ * If a link is switched off and another is switched on at the same
+ * time (e.g. active_links going from 0x1 to 0x10) then you will get
+ * a sequence of calls like
+ * - change_vif_links(0x11)
+ * - unassign_vif_chanctx(link_id=0)
+ * - change_sta_links(0x11) for each affected STA (the AP)
+ * (TDLS connections on now inactive links should be torn down)
+ * - remove group keys on the old link (link_id 0)
+ * - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
+ * - change_sta_links(0x10) for each affected STA (the AP)
+ * - assign_vif_chanctx(link_id=4)
+ * - change_vif_links(0x10)
+ *
+ * Note: This function acquires some mac80211 locks and must not
+ * be called with any driver locks held that could cause a
+ * lock dependency inversion. Best call it without locks.
+ */
+int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
+
+/**
+ * ieee80211_set_active_links_async - asynchronously set active links
+ * @vif: interface to set active links on
+ * @active_links: the new active links bitmap
+ *
+ * See ieee80211_set_active_links() for more information, the only
+ * difference here is that the link change is triggered async and
+ * can be called in any context, but the link switch will only be
+ * completed after it returns.
+ */
+void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
+ u16 active_links);
+
#endif /* MAC80211_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
index d6fa6b97f6ef..5b9c61c4d3a6 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -14,12 +14,27 @@
#define MACSEC_DEFAULT_PN_LEN 4
#define MACSEC_XPN_PN_LEN 8
-#define MACSEC_SALT_LEN 12
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+#define MACSEC_SCI_LEN 8
+#define MACSEC_PORT_ES (htons(0x0001))
+
+#define MACSEC_TCI_VERSION 0x80
+#define MACSEC_TCI_ES 0x40 /* end station */
+#define MACSEC_TCI_SC 0x20 /* SCI present */
+#define MACSEC_TCI_SCB 0x10 /* epon */
+#define MACSEC_TCI_E 0x08 /* encryption */
+#define MACSEC_TCI_C 0x04 /* changed text */
+#define MACSEC_AN_MASK 0x03 /* association number */
+#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
+
+#define MACSEC_DEFAULT_ICV_LEN 16
+
typedef u64 __bitwise sci_t;
typedef u32 __bitwise ssci_t;
+struct metadata_dst;
+
typedef union salt {
struct {
u32 ssci;
@@ -183,6 +198,7 @@ struct macsec_tx_sa {
* @scb: single copy broadcast flag
* @sa: array of secure associations
* @stats: stats for this TXSC
+ * @md_dst: MACsec offload metadata dst
*/
struct macsec_tx_sc {
bool active;
@@ -193,6 +209,7 @@ struct macsec_tx_sc {
bool scb;
struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
struct pcpu_tx_sc_stats __percpu *stats;
+ struct metadata_dst *md_dst;
};
/**
@@ -254,8 +271,6 @@ struct macsec_context {
struct macsec_rx_sa_stats *rx_sa_stats;
struct macsec_dev_stats *dev_stats;
} stats;
-
- u8 prepare:1;
};
/**
@@ -289,5 +304,12 @@ struct macsec_ops {
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+static inline bool macsec_send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 3827a6b395fd..20745cf7ae1a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -276,11 +276,6 @@ static inline void *neighbour_priv(const struct neighbour *n)
extern const struct nla_policy nda_policy[];
-static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
-{
- return *(const u16 *)n->primary_key == *(const u16 *)pkey;
-}
-
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
diff --git a/include/net/netfilter/nf_conntrack_bpf.h b/include/net/netfilter/nf_conntrack_bpf.h
index a473b56842c5..2d0da478c8e0 100644
--- a/include/net/netfilter/nf_conntrack_bpf.h
+++ b/include/net/netfilter/nf_conntrack_bpf.h
@@ -3,13 +3,18 @@
#ifndef _NF_CONNTRACK_BPF_H
#define _NF_CONNTRACK_BPF_H
-#include <linux/btf.h>
#include <linux/kconfig.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct nf_conn___init {
+ struct nf_conn ct;
+};
#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
(IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
extern int register_nf_conntrack_bpf(void);
+extern void cleanup_nf_conntrack_bpf(void);
#else
@@ -18,6 +23,24 @@ static inline int register_nf_conntrack_bpf(void)
return 0;
}
+static inline void cleanup_nf_conntrack_bpf(void)
+{
+}
+
+#endif
+
+#if (IS_BUILTIN(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_NAT) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+
+extern int register_nf_nat_bpf(void);
+
+#else
+
+static inline int register_nf_nat_bpf(void)
+{
+ return 0;
+}
+
#endif
#endif /* _NF_CONNTRACK_BPF_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 3cd3a6e631aa..b2b9de70d9f4 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -86,10 +86,6 @@ extern spinlock_t nf_conntrack_expect_lock;
/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
-#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
- (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES) || \
- IS_ENABLED(CONFIG_NF_CT_NETLINK))
-
static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
{
if (timeout > INT_MAX)
@@ -101,6 +97,4 @@ int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off);
int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status);
-#endif
-
#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index efae84646353..44c421b9be85 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -38,4 +38,5 @@ bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
* to port ct->master->saved_proto. */
void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
+u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 7a2a9d3144ba..4418b1981e31 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -325,6 +325,7 @@ struct nla_policy {
struct netlink_range_validation_signed *range_signed;
struct {
s16 min, max;
+ u8 network_byte_order:1;
};
int (*validate)(const struct nlattr *attr,
struct netlink_ext_ack *extack);
@@ -418,6 +419,14 @@ struct nla_policy {
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
+ .network_byte_order = 0, \
+}
+
+#define NLA_POLICY_MAX_BE(tp, _max) { \
+ .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MAX, \
+ .max = _max, \
+ .network_byte_order = 1, \
}
#define NLA_POLICY_MASK(tp, _mask) { \
@@ -741,6 +750,7 @@ static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse()
@@ -760,6 +770,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated()
@@ -779,6 +790,7 @@ static inline int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
* @extack: extended ACK report struct
*
* See nla_parse_deprecated_strict()
@@ -814,7 +826,6 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 7ce68183f6e1..00c399edeed1 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -33,7 +33,7 @@ struct net_generic {
struct rcu_head rcu;
} s;
- void *ptr[0];
+ DECLARE_FLEX_ARRAY(void *, ptr);
};
};
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 6320a76cefdc..1b8004679445 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -34,6 +34,7 @@ struct inet_hashinfo;
struct inet_timewait_death_row {
refcount_t tw_refcount;
+ /* Padding to avoid false sharing, tw_refcount can be often written */
struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
int sysctl_max_tw_buckets;
};
@@ -41,7 +42,7 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
struct netns_ipv4 {
- struct inet_timewait_death_row *tcp_death_row;
+ struct inet_timewait_death_row tcp_death_row;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
@@ -170,6 +171,7 @@ struct netns_ipv4 {
int sysctl_tcp_pacing_ca_ratio;
int sysctl_tcp_wmem[3];
int sysctl_tcp_rmem[3];
+ unsigned int sysctl_tcp_child_ehash_entries;
unsigned long sysctl_tcp_comp_sack_delay_ns;
unsigned long sysctl_tcp_comp_sack_slack_ns;
int sysctl_max_syn_backlog;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index b593f95e9991..02bbdc577f8e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -24,9 +24,6 @@ struct netns_nf {
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
#endif
-#if IS_ENABLED(CONFIG_DECNET)
- struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
-#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
unsigned int defrag_ipv4_users;
#endif
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
index 2adbe2b245df..582212ada3ba 100644
--- a/include/net/netns/smc.h
+++ b/include/net/netns/smc.h
@@ -19,5 +19,8 @@ struct netns_smc {
#endif
unsigned int sysctl_autocorking_size;
unsigned int sysctl_smcr_buf_type;
+ int sysctl_smcr_testlink_time;
+ int sysctl_wmem;
+ int sysctl_rmem;
};
#endif
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 145acb8f2509..f5850b569c52 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -58,9 +58,6 @@ enum nl802154_commands {
NL802154_CMD_SET_WPAN_PHY_NETNS,
- /* add new commands above here */
-
-#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
NL802154_CMD_SET_SEC_PARAMS,
NL802154_CMD_GET_SEC_KEY, /* can dump */
NL802154_CMD_NEW_SEC_KEY,
@@ -74,7 +71,8 @@ enum nl802154_commands {
NL802154_CMD_GET_SEC_LEVEL, /* can dump */
NL802154_CMD_NEW_SEC_LEVEL,
NL802154_CMD_DEL_SEC_LEVEL,
-#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
+ /* add new commands above here */
/* used to define NL802154_CMD_MAX below */
__NL802154_CMD_AFTER_LAST,
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index d9d90e6925e1..4cabb32a2ad9 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -81,6 +81,19 @@ int tcf_classify(struct sk_buff *skb,
const struct tcf_proto *tp, struct tcf_result *res,
bool compat_mode);
+static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
+ struct tcf_walker *arg,
+ void *filter)
+{
+ if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
+
#else
static inline bool tcf_block_shared(struct tcf_block *block)
{
@@ -197,6 +210,18 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
__tcf_unbind_filter(q, r);
}
+static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
+ void *q, struct tcf_result *res,
+ unsigned long base)
+{
+ if (res->classid == classid) {
+ if (cl)
+ __tcf_bind_filter(q, res, base);
+ else
+ __tcf_unbind_filter(q, res);
+ }
+}
+
struct tcf_exts {
#ifdef CONFIG_NET_CLS_ACT
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 3372a1f67cf4..38207873eda6 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -100,7 +100,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
int register_qdisc(struct Qdisc_ops *qops);
-int unregister_qdisc(struct Qdisc_ops *qops);
+void unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
@@ -141,6 +141,11 @@ static inline struct net *qdisc_net(struct Qdisc *q)
return dev_net(q->dev_queue->dev);
}
+struct tc_query_caps_base {
+ enum tc_setup_type type;
+ void *caps;
+};
+
struct tc_cbs_qopt_offload {
u8 enable;
s32 queue;
@@ -155,6 +160,10 @@ struct tc_etf_qopt_offload {
s32 queue;
};
+struct tc_taprio_caps {
+ bool supports_queue_max_sdu:1;
+};
+
struct tc_taprio_sched_entry {
u8 command; /* TC_TAPRIO_CMD_* */
@@ -168,6 +177,7 @@ struct tc_taprio_qopt_offload {
ktime_t base_time;
u64 cycle_time;
u64 cycle_time_extension;
+ u32 max_sdu[TC_MAX_QUEUE];
size_t num_entries;
struct tc_taprio_sched_entry entries[];
@@ -222,4 +232,17 @@ static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
return cb;
}
+static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
+ unsigned long cl,
+ struct qdisc_walker *arg)
+{
+ if (arg->count >= arg->skip && arg->fn(sch, cl, arg) < 0) {
+ arg->stop = 1;
+ return false;
+ }
+
+ arg->count++;
+ return true;
+}
+
#endif
diff --git a/include/net/red.h b/include/net/red.h
index be11dbd26492..454ac2b65d8c 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -122,7 +122,6 @@ struct red_stats {
u32 forced_drop; /* Forced drops, qavg > max_thresh */
u32 forced_mark; /* Forced marks, qavg > max_thresh */
u32 pdrop; /* Drops due to queue limits */
- u32 other; /* Drops due to drop() calls */
};
struct red_parms {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index ec693fe7c553..d5517719af4e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -326,11 +326,6 @@ struct tcf_result {
};
const struct tcf_proto *goto_tp;
- /* used in the skb_tc_reinsert function */
- struct {
- bool ingress;
- struct gnet_stats_queue *qstats;
- };
};
};
@@ -682,6 +677,9 @@ qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
{
}
#endif
+void qdisc_offload_query_caps(struct net_device *dev,
+ enum tc_setup_type type,
+ void *caps, size_t caps_len);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
@@ -940,13 +938,6 @@ static inline void qdisc_purge_queue(struct Qdisc *sch)
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}
-static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
-{
- qh->head = NULL;
- qh->tail = NULL;
- qh->qlen = 0;
-}
-
static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
struct qdisc_skb_head *qh)
{
@@ -1137,7 +1128,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
static inline void qdisc_reset_queue(struct Qdisc *sch)
{
__qdisc_reset_queue(&sch->q);
- sch->qstats.backlog = 0;
}
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
diff --git a/include/net/sock.h b/include/net/sock.h
index d08cfe190a78..08038a385ef2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -348,6 +348,7 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
+ * @sk_bind2_node: bind node in the bhash2 table
*/
struct sock {
/*
@@ -537,6 +538,7 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
+ struct hlist_node sk_bind2_node;
};
enum sk_pacing {
@@ -742,11 +744,6 @@ static inline void sk_node_init(struct hlist_node *node)
node->pprev = NULL;
}
-static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
-{
- node->pprev = NULL;
-}
-
static inline void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->sk_node);
@@ -870,6 +867,16 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
+static inline void __sk_del_bind2_node(struct sock *sk)
+{
+ __hlist_del(&sk->sk_bind2_node);
+}
+
+static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
+{
+ hlist_add_head(&sk->sk_bind2_node, list);
+}
+
#define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
@@ -887,6 +894,8 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_bhash2(__sk, list) \
+ hlist_for_each_entry(__sk, list, sk_bind2_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -1774,6 +1783,11 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
}
}
+void sockopt_lock_sock(struct sock *sk);
+void sockopt_release_sock(struct sock *sk);
+bool sockopt_ns_capable(struct user_namespace *ns, int cap);
+bool sockopt_capable(int cap);
+
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
@@ -1848,9 +1862,13 @@ void sock_pfree(struct sk_buff *skb);
#define sock_edemux sock_efree
#endif
+int sk_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
int sock_setsockopt(struct socket *sock, int level, int op,
sockptr_t optval, unsigned int optlen);
+int sk_getsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
int sock_getsockopt(struct socket *sock, int level, int op,
char __user *optval, int __user *optlen);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d10962b9f0d0..14d45661a84d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -327,6 +327,8 @@ void tcp_remove_empty_skb(struct sock *sk);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
+int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
+ size_t size, struct ubuf_info *uarg);
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
@@ -346,6 +348,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
+void tcp_twsk_purge(struct list_head *net_exit_list, int family);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
@@ -402,9 +405,13 @@ void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
+int do_tcp_getsockopt(struct sock *sk, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
bool tcp_bpf_bypass_getsockopt(int level, int optname);
+int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
@@ -1295,11 +1302,14 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ if (tp->is_cwnd_limited)
+ return true;
+
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
if (tcp_in_slow_start(tp))
return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
- return tp->is_cwnd_limited;
+ return false;
}
/* BBR congestion control needs pacing.
diff --git a/include/net/tls.h b/include/net/tls.h
index cb205f9d9473..154949c7b0c8 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -51,6 +51,16 @@
struct tls_rec;
+struct tls_cipher_size_desc {
+ unsigned int iv;
+ unsigned int key;
+ unsigned int salt;
+ unsigned int tag;
+ unsigned int rec_seq;
+};
+
+extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
+
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
diff --git a/include/net/udp.h b/include/net/udp.h
index 5ee88ddf79c3..fee053bcd17c 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -247,7 +247,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
}
/* net/ipv4/udp.c */
-void udp_destruct_sock(struct sock *sk);
+void udp_destruct_common(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 0143b373602e..299c14ce2bb9 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -25,14 +25,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset,
return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
}
-/* Designate sk as UDP-Lite socket */
-static inline int udplite_sk_init(struct sock *sk)
-{
- udp_init_sock(sk);
- udp_sk(sk)->pcflag = UDPLITE_BIT;
- return 0;
-}
-
/*
* Checksumming routines
*/
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 04c852c7a77f..55dbc68bfffc 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -164,13 +164,13 @@ struct xdp_frame {
void *data;
u16 len;
u16 headroom;
- u32 metasize:8;
- u32 frame_sz:24;
+ u32 metasize; /* uses lower 8-bits */
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
* while mem info is valid on remote CPU.
*/
struct xdp_mem_info mem;
struct net_device *dev_rx; /* used by cpumap */
+ u32 frame_sz;
u32 flags; /* supported values defined in xdp_buff_flags */
};
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 0e58c38ce0c1..9c0d860609ba 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -9,6 +9,9 @@
#include <net/xdp_sock.h>
#include <net/xsk_buff_pool.h>
+#define XDP_UMEM_MIN_CHUNK_SHIFT 11
+#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+
#ifdef CONFIG_XDP_SOCKETS
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
@@ -104,13 +107,6 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
-static inline void xsk_buff_discard(struct xdp_buff *xdp)
-{
- struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
-
- xp_release(xskb);
-}
-
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 6e8fa98f786f..dbc81f5eb553 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -312,9 +312,15 @@ struct km_event {
struct net *net;
};
+struct xfrm_if_decode_session_result {
+ struct net *net;
+ u32 if_id;
+};
+
struct xfrm_if_cb {
- struct xfrm_if *(*decode_session)(struct sk_buff *skb,
- unsigned short family);
+ bool (*decode_session)(struct sk_buff *skb,
+ unsigned short family,
+ struct xfrm_if_decode_session_result *res);
};
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -399,7 +405,8 @@ struct xfrm_type {
#define XFRM_TYPE_LOCAL_COADDR 4
#define XFRM_TYPE_REMOTE_COADDR 8
- int (*init_state)(struct xfrm_state *x);
+ int (*init_state)(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
void (*destructor)(struct xfrm_state *);
int (*input)(struct xfrm_state *, struct sk_buff *skb);
int (*output)(struct xfrm_state *, struct sk_buff *pskb);
@@ -985,6 +992,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
u32 if_id; /* interface identifyer */
+ bool collect_md;
};
struct xfrm_if {
@@ -1573,9 +1581,10 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ struct netlink_ext_ack *extack);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
@@ -1879,7 +1888,8 @@ void xfrm_dev_resume(struct sk_buff *skb);
void xfrm_dev_backlog(struct softnet_data *sd);
struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
- struct xfrm_user_offload *xuo);
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
@@ -1942,7 +1952,7 @@ static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_fea
return skb;
}
-static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
+static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
{
return 0;
}
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 647722e847b4..f787c3f524b0 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -95,7 +95,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem);
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags);
-int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
struct net_device *dev, u16 queue_id);
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index e23eb357b761..a2ac62b4a6cf 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -294,7 +294,6 @@ struct ib_cm_id {
void *context;
struct ib_device *device;
__be64 service_id;
- __be64 service_mask;
enum ib_cm_state state; /* internal CM/debug use */
enum ib_cm_lap_state lap_state; /* internal CM/debug use */
__be32 local_id;
@@ -340,13 +339,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
* and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller.
- * @service_mask: Mask applied to service ID used to listen across a
- * range of service IDs. If set to 0, the service ID is matched
- * exactly. This parameter is ignored if %service_id is set to
- * IB_CM_ASSIGN_SERVICE_ID.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
- __be64 service_mask);
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id);
struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
ib_cm_handler cm_handler,
@@ -354,6 +348,8 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
struct ib_cm_req_param {
struct sa_path_rec *primary_path;
+ struct sa_path_rec *primary_path_inbound;
+ struct sa_path_rec *primary_path_outbound;
struct sa_path_rec *alternate_path;
const struct ib_gid_attr *ppath_sgid_attr;
__be64 service_id;
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 3634d4cc7a56..e930bec33b31 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -186,6 +186,7 @@ struct sa_path_rec {
struct sa_path_rec_opa opa;
};
enum sa_path_rec_type rec_type;
+ u32 flags;
};
static inline enum ib_gid_type
@@ -413,7 +414,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
gfp_t gfp_mask,
void (*callback)(int status, struct sa_path_rec *resp,
- void *context),
+ int num_prs, void *context),
void *context, struct ib_sa_query **query);
struct ib_sa_multicast {
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 5b18e2e36ee6..cdc7cafab572 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -49,10 +49,21 @@ struct rdma_addr {
struct rdma_dev_addr dev_addr;
};
+#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
struct rdma_route {
struct rdma_addr addr;
struct sa_path_rec *path_rec;
- int num_paths;
+
+ /* Optional path records of primary path */
+ struct sa_path_rec *path_rec_inbound;
+ struct sa_path_rec *path_rec_outbound;
+
+ /*
+ * 0 - No primary nor alternate path is available
+ * 1 - Only primary path is available
+ * 2 - Both primary and alternate path are available
+ */
+ int num_pri_alt_paths;
};
struct rdma_conn_param {
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 2dafd7dbe893..c429d6ddb129 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -445,7 +445,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
* to work by setting the name manually here.
*/
dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
- strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
+ strscpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 23bb404aba12..9d45a5b20316 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -24,6 +24,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_PTR_OUT,
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
+ UVERBS_ATTR_TYPE_RAW_FD,
UVERBS_ATTR_TYPE_ENUM_IN,
UVERBS_ATTR_TYPE_IDRS_ARRAY,
};
@@ -521,6 +522,11 @@ struct uapi_definition {
.u.obj.access = _access, \
__VA_ARGS__ } })
+#define UVERBS_ATTR_RAW_FD(_attr_id, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id), \
+ .attr = { .type = UVERBS_ATTR_TYPE_RAW_FD, __VA_ARGS__ } })
+
#define UVERBS_ATTR_PTR_IN(_attr_id, _type, ...) \
(&(const struct uverbs_attr_def){ \
.id = _attr_id, \
@@ -999,4 +1005,11 @@ _uverbs_get_const_unsigned(u64 *to,
uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, \
_default))
+static inline int
+uverbs_get_raw_fd(int *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx)
+{
+ return uverbs_get_const_signed(to, attrs_bundle, idx);
+}
+
#endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index bac55decf900..7d3622db38ed 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -201,7 +201,7 @@ static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
for_each_sg(scsi_sglist(cmd), sg, nseg, __i)
static inline int scsi_sg_copy_from_buffer(struct scsi_cmnd *cmd,
- void *buf, int buflen)
+ const void *buf, int buflen)
{
return sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
buf, buflen);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 2493bd65351a..c36656d8ac6c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -231,6 +231,7 @@ struct scsi_device {
atomic_t iorequest_cnt;
atomic_t iodone_cnt;
atomic_t ioerr_cnt;
+ atomic_t iotmo_cnt;
struct device sdev_gendev,
sdev_dev;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 9b0a028bf053..fcf25f1642a3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -276,7 +276,7 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* map_queues)(struct Scsi_Host *shost);
+ void (* map_queues)(struct Scsi_Host *shost);
/*
* SCSI interface of blk_poll - poll for IO completions.
diff --git a/include/scsi/scsi_status.h b/include/scsi/scsi_status.h
index 31d30cee1869..9cb85262de64 100644
--- a/include/scsi/scsi_status.h
+++ b/include/scsi/scsi_status.h
@@ -62,12 +62,12 @@ enum scsi_host_status {
* recover the link. Transport class will
* retry or fail IO */
DID_TRANSPORT_FAILFAST = 0x0f, /* Transport class fastfailed the io */
- DID_TARGET_FAILURE = 0x10, /* Permanent target failure, do not retry on
- * other paths */
- DID_NEXUS_FAILURE = 0x11, /* Permanent nexus failure, retry on other
- * paths might yield different results */
- DID_ALLOC_FAILURE = 0x12, /* Space allocation on the device failed */
- DID_MEDIUM_ERROR = 0x13, /* Medium error */
+ /*
+ * We used to have DID_TARGET_FAILURE, DID_NEXUS_FAILURE,
+ * DID_ALLOC_FAILURE and DID_MEDIUM_ERROR at 0x10 - 0x13. For compat
+ * with userspace apps that parse the host byte for SG IO, we leave
+ * that block of codes unused and start at 0x14 below.
+ */
DID_TRANSPORT_MARGINAL = 0x14, /* Transport marginal errors */
};
diff --git a/include/soc/at91/pm.h b/include/soc/at91/pm.h
deleted file mode 100644
index 7a41e53a3ffa..000000000000
--- a/include/soc/at91/pm.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Atmel Power Management
- *
- * Copyright (C) 2020 Atmel
- *
- * Author: Lee Jones <lee.jones@linaro.org>
- */
-
-#ifndef __SOC_ATMEL_PM_H
-#define __SOC_ATMEL_PM_H
-
-void at91_pinctrl_gpio_suspend(void);
-void at91_pinctrl_gpio_resume(void);
-
-#endif /* __SOC_ATMEL_PM_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 59eeba31c192..0d3d6beb7fdb 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1172,6 +1172,15 @@ int qman_delete_cgr(struct qman_cgr *cgr);
void qman_delete_cgr_safe(struct qman_cgr *cgr);
/**
+ * qman_update_cgr_safe - Modifies a congestion group object from any CPU
+ * @cgr: the 'cgr' object to modify
+ * @opts: state of the CGR settings
+ *
+ * This will select the proper CPU and modify the CGR settings.
+ */
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts);
+
+/**
* qman_query_cgr_congested - Queries CGR's congestion status
* @cgr: the 'cgr' object to query
* @result: returns 'cgr's congestion status, 1 (true) if congested
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index 11f7d6b59642..dfd8efca5e60 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -11,6 +11,11 @@
#if IS_ENABLED(CONFIG_MTK_SMI)
+enum iommu_atf_cmd {
+ IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_MAX,
+};
+
#define MTK_SMI_MMU_EN(port) BIT(port)
struct mtk_smi_larb_iommu {
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
index 6466515262bd..f916dcde457f 100644
--- a/include/soc/microchip/mpfs.h
+++ b/include/soc/microchip/mpfs.h
@@ -40,4 +40,12 @@ struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev);
#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */
+#if IS_ENABLED(CONFIG_MCHP_CLK_MPFS)
+
+u32 mpfs_reset_read(struct device *dev);
+
+void mpfs_reset_write(struct device *dev, u32 val);
+
+#endif /* if IS_ENABLED(CONFIG_MCHP_CLK_MPFS) */
+
#endif /* __SOC_MPFS_H__ */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 2edea901bbd5..967ba30ea636 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -392,7 +392,7 @@ enum ocelot_reg {
SYS_COUNT_TX_GREEN_PRIO_5,
SYS_COUNT_TX_GREEN_PRIO_6,
SYS_COUNT_TX_GREEN_PRIO_7,
- SYS_COUNT_TX_AGING,
+ SYS_COUNT_TX_AGED,
SYS_COUNT_DROP_LOCAL,
SYS_COUNT_DROP_TAIL,
SYS_COUNT_DROP_YELLOW_PRIO_0,
@@ -411,6 +411,10 @@ enum ocelot_reg {
SYS_COUNT_DROP_GREEN_PRIO_5,
SYS_COUNT_DROP_GREEN_PRIO_6,
SYS_COUNT_DROP_GREEN_PRIO_7,
+ SYS_COUNT_SF_MATCHING_FRAMES,
+ SYS_COUNT_SF_NOT_PASSING_FRAMES,
+ SYS_COUNT_SF_NOT_PASSING_SDU,
+ SYS_COUNT_SF_RED_FRAMES,
SYS_RESET_CFG,
SYS_SR_ETYPE_CFG,
SYS_VLAN_ETYPE_CFG,
@@ -433,7 +437,6 @@ enum ocelot_reg {
SYS_MMGT_FAST,
SYS_EVENTS_DIF,
SYS_EVENTS_CORE,
- SYS_CNT,
SYS_PTP_STATUS,
SYS_PTP_TXSTAMP,
SYS_PTP_NXT,
@@ -695,6 +698,112 @@ struct ocelot_stat_layout {
char name[ETH_GSTRING_LEN];
};
+/* 32-bit counter checked for wraparound by ocelot_port_update_stats()
+ * and copied to ocelot->stats.
+ */
+#define OCELOT_STAT(kind) \
+ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind }
+/* Same as above, except also exported to ethtool -S. Standard counters should
+ * only be exposed to more specific interfaces rather than by their string name.
+ */
+#define OCELOT_STAT_ETHTOOL(kind, ethtool_name) \
+ [OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind, .name = ethtool_name }
+
+#define OCELOT_COMMON_STATS \
+ OCELOT_STAT_ETHTOOL(RX_OCTETS, "rx_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_UNICAST, "rx_unicast"), \
+ OCELOT_STAT_ETHTOOL(RX_MULTICAST, "rx_multicast"), \
+ OCELOT_STAT_ETHTOOL(RX_BROADCAST, "rx_broadcast"), \
+ OCELOT_STAT_ETHTOOL(RX_SHORTS, "rx_shorts"), \
+ OCELOT_STAT_ETHTOOL(RX_FRAGMENTS, "rx_fragments"), \
+ OCELOT_STAT_ETHTOOL(RX_JABBERS, "rx_jabbers"), \
+ OCELOT_STAT_ETHTOOL(RX_CRC_ALIGN_ERRS, "rx_crc_align_errs"), \
+ OCELOT_STAT_ETHTOOL(RX_SYM_ERRS, "rx_sym_errs"), \
+ OCELOT_STAT_ETHTOOL(RX_64, "rx_frames_below_65_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_65_127, "rx_frames_65_to_127_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_128_255, "rx_frames_128_to_255_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_256_511, "rx_frames_256_to_511_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_512_1023, "rx_frames_512_to_1023_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_1024_1526, "rx_frames_1024_to_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_1527_MAX, "rx_frames_over_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(RX_PAUSE, "rx_pause"), \
+ OCELOT_STAT_ETHTOOL(RX_CONTROL, "rx_control"), \
+ OCELOT_STAT_ETHTOOL(RX_LONGS, "rx_longs"), \
+ OCELOT_STAT_ETHTOOL(RX_CLASSIFIED_DROPS, "rx_classified_drops"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_0, "rx_red_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_1, "rx_red_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_2, "rx_red_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_3, "rx_red_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_4, "rx_red_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_5, "rx_red_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_6, "rx_red_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_RED_PRIO_7, "rx_red_prio_7"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_0, "rx_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_1, "rx_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_2, "rx_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_3, "rx_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_4, "rx_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_5, "rx_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_6, "rx_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_7, "rx_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_0, "rx_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_1, "rx_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_2, "rx_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_3, "rx_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_4, "rx_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_5, "rx_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_6, "rx_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_7, "rx_green_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_OCTETS, "tx_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_UNICAST, "tx_unicast"), \
+ OCELOT_STAT_ETHTOOL(TX_MULTICAST, "tx_multicast"), \
+ OCELOT_STAT_ETHTOOL(TX_BROADCAST, "tx_broadcast"), \
+ OCELOT_STAT_ETHTOOL(TX_COLLISION, "tx_collision"), \
+ OCELOT_STAT_ETHTOOL(TX_DROPS, "tx_drops"), \
+ OCELOT_STAT_ETHTOOL(TX_PAUSE, "tx_pause"), \
+ OCELOT_STAT_ETHTOOL(TX_64, "tx_frames_below_65_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_65_127, "tx_frames_65_to_127_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_128_255, "tx_frames_128_255_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_256_511, "tx_frames_256_511_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_512_1023, "tx_frames_512_1023_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_1024_1526, "tx_frames_1024_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_1527_MAX, "tx_frames_over_1526_octets"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_0, "tx_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_1, "tx_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_2, "tx_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_3, "tx_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_4, "tx_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_5, "tx_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_6, "tx_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_7, "tx_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_0, "tx_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_1, "tx_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_2, "tx_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_3, "tx_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_4, "tx_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_5, "tx_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_6, "tx_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_7, "tx_green_prio_7"), \
+ OCELOT_STAT_ETHTOOL(TX_AGED, "tx_aged"), \
+ OCELOT_STAT_ETHTOOL(DROP_LOCAL, "drop_local"), \
+ OCELOT_STAT_ETHTOOL(DROP_TAIL, "drop_tail"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_0, "drop_yellow_prio_0"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_1, "drop_yellow_prio_1"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_2, "drop_yellow_prio_2"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_3, "drop_yellow_prio_3"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_4, "drop_yellow_prio_4"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_5, "drop_yellow_prio_5"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_6, "drop_yellow_prio_6"), \
+ OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_7, "drop_yellow_prio_7"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_0, "drop_green_prio_0"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_1, "drop_green_prio_1"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_2, "drop_green_prio_2"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_3, "drop_green_prio_3"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_4, "drop_green_prio_4"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_5, "drop_green_prio_5"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_6, "drop_green_prio_6"), \
+ OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_7, "drop_green_prio_7")
+
struct ocelot_stats_region {
struct list_head node;
u32 base;
@@ -726,6 +835,7 @@ struct ocelot_ops {
struct flow_stats *stats);
void (*cut_through_fwd)(struct ocelot *ocelot);
void (*tas_clock_adjust)(struct ocelot *ocelot);
+ void (*update_stats)(struct ocelot *ocelot);
};
struct ocelot_vcap_policer {
@@ -763,6 +873,8 @@ struct ocelot_psfp_list {
struct list_head stream_list;
struct list_head sfi_list;
struct list_head sgi_list;
+ /* Serialize access to the lists */
+ struct mutex lock;
};
enum ocelot_sb {
@@ -898,12 +1010,15 @@ struct ocelot {
struct ocelot_psfp_list psfp;
- /* Workqueue to check statistics for overflow with its lock */
- spinlock_t stats_lock;
- u64 *stats;
+ /* Workqueue to check statistics for overflow */
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+ /* Lock for serializing access to the statistics array */
+ spinlock_t stats_lock;
+ u64 *stats;
+ /* Lock for serializing indirect access to STAT_VIEW registers */
+ struct mutex stat_view_lock;
/* Lock for serializing access to the MAC table */
struct mutex mact_lock;
/* Lock for serializing forwarding domain changes */
@@ -1024,6 +1139,8 @@ void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
void ocelot_deinit_port(struct ocelot *ocelot, int port);
+void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
+void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu);
void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port);
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
@@ -1032,6 +1149,19 @@ u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port);
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
+void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
+ struct rtnl_link_stats64 *stats);
+void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
+ struct ethtool_pause_stats *pause_stats);
+void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges);
+void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_phy_stats *phy_stats);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
@@ -1099,10 +1229,12 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
const struct net_device *bridge);
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
- struct netdev_lag_upper_info *info);
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond);
void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active);
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond);
int ocelot_devlink_sb_register(struct ocelot *ocelot);
void ocelot_devlink_sb_unregister(struct ocelot *ocelot);
diff --git a/include/soc/sifive/sifive_ccache.h b/include/soc/sifive/sifive_ccache.h
new file mode 100644
index 000000000000..4d4ed49388a0
--- /dev/null
+++ b/include/soc/sifive/sifive_ccache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive Composable Cache Controller header file
+ *
+ */
+
+#ifndef __SOC_SIFIVE_CCACHE_H
+#define __SOC_SIFIVE_CCACHE_H
+
+extern int register_sifive_ccache_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_ccache_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_CCACHE_ERR_TYPE_CE 0
+#define SIFIVE_CCACHE_ERR_TYPE_UE 1
+
+#endif /* __SOC_SIFIVE_CCACHE_H */
diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h
deleted file mode 100644
index 92ade10ed67e..000000000000
--- a/include/soc/sifive/sifive_l2_cache.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SiFive L2 Cache Controller header file
- *
- */
-
-#ifndef __SOC_SIFIVE_L2_CACHE_H
-#define __SOC_SIFIVE_L2_CACHE_H
-
-extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
-extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
-
-#define SIFIVE_L2_ERR_TYPE_CE 0
-#define SIFIVE_L2_ERR_TYPE_UE 1
-
-#endif /* __SOC_SIFIVE_L2_CACHE_H */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 67d2bc856fbc..977c334136e9 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -58,6 +58,7 @@ u32 tegra_read_chipid(void);
u8 tegra_get_chip_id(void);
u8 tegra_get_platform(void);
bool tegra_is_silicon(void);
+int tegra194_miscreg_mask_serror(void);
#else
static struct tegra_sku_info tegra_sku_info __maybe_unused;
@@ -95,6 +96,11 @@ static inline bool tegra_is_silicon(void)
{
return false;
}
+
+static inline int tegra194_miscreg_mask_serror(void)
+{
+ return false;
+}
#endif
struct device *tegra_soc_device_register(void);
diff --git a/include/soc/tegra/tegra-cbb.h b/include/soc/tegra/tegra-cbb.h
new file mode 100644
index 000000000000..e864c2ebe794
--- /dev/null
+++ b/include/soc/tegra/tegra-cbb.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#ifndef TEGRA_CBB_H
+#define TEGRA_CBB_H
+
+#include <linux/list.h>
+
+struct tegra_cbb_error {
+ const char *code;
+ const char *source;
+ const char *desc;
+};
+
+struct tegra_cbb {
+ struct device *dev;
+ const struct tegra_cbb_ops *ops;
+ struct list_head node;
+};
+
+struct tegra_cbb_ops {
+ int (*debugfs_show)(struct tegra_cbb *cbb, struct seq_file *s, void *v);
+ int (*interrupt_enable)(struct tegra_cbb *cbb);
+ void (*error_enable)(struct tegra_cbb *cbb);
+ void (*fault_enable)(struct tegra_cbb *cbb);
+ void (*stall_enable)(struct tegra_cbb *cbb);
+ void (*error_clear)(struct tegra_cbb *cbb);
+ u32 (*get_status)(struct tegra_cbb *cbb);
+};
+
+int tegra_cbb_get_irq(struct platform_device *pdev, unsigned int *nonsec_irq,
+ unsigned int *sec_irq);
+__printf(2, 3)
+void tegra_cbb_print_err(struct seq_file *file, const char *fmt, ...);
+
+void tegra_cbb_print_cache(struct seq_file *file, u32 cache);
+void tegra_cbb_print_prot(struct seq_file *file, u32 prot);
+int tegra_cbb_register(struct tegra_cbb *cbb);
+
+void tegra_cbb_fault_enable(struct tegra_cbb *cbb);
+void tegra_cbb_stall_enable(struct tegra_cbb *cbb);
+void tegra_cbb_error_clear(struct tegra_cbb *cbb);
+u32 tegra_cbb_get_status(struct tegra_cbb *cbb);
+
+#endif /* TEGRA_CBB_H */
diff --git a/include/sound/acp62_chip_offset_byte.h b/include/sound/acp62_chip_offset_byte.h
new file mode 100644
index 000000000000..f03992f81168
--- /dev/null
+++ b/include/sound/acp62_chip_offset_byte.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 6.2 Register Documentation
+ *
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _acp_ip_OFFSET_HEADER
+#define _acp_ip_OFFSET_HEADER
+
+/* Registers from ACP_DMA block */
+#define ACP_DMA_CNTL_0 0x0000000
+#define ACP_DMA_CNTL_1 0x0000004
+#define ACP_DMA_CNTL_2 0x0000008
+#define ACP_DMA_CNTL_3 0x000000C
+#define ACP_DMA_CNTL_4 0x0000010
+#define ACP_DMA_CNTL_5 0x0000014
+#define ACP_DMA_CNTL_6 0x0000018
+#define ACP_DMA_CNTL_7 0x000001C
+#define ACP_DMA_DSCR_STRT_IDX_0 0x0000020
+#define ACP_DMA_DSCR_STRT_IDX_1 0x0000024
+#define ACP_DMA_DSCR_STRT_IDX_2 0x0000028
+#define ACP_DMA_DSCR_STRT_IDX_3 0x000002C
+#define ACP_DMA_DSCR_STRT_IDX_4 0x0000030
+#define ACP_DMA_DSCR_STRT_IDX_5 0x0000034
+#define ACP_DMA_DSCR_STRT_IDX_6 0x0000038
+#define ACP_DMA_DSCR_STRT_IDX_7 0x000003C
+#define ACP_DMA_DSCR_CNT_0 0x0000040
+#define ACP_DMA_DSCR_CNT_1 0x0000044
+#define ACP_DMA_DSCR_CNT_2 0x0000048
+#define ACP_DMA_DSCR_CNT_3 0x000004C
+#define ACP_DMA_DSCR_CNT_4 0x0000050
+#define ACP_DMA_DSCR_CNT_5 0x0000054
+#define ACP_DMA_DSCR_CNT_6 0x0000058
+#define ACP_DMA_DSCR_CNT_7 0x000005C
+#define ACP_DMA_PRIO_0 0x0000060
+#define ACP_DMA_PRIO_1 0x0000064
+#define ACP_DMA_PRIO_2 0x0000068
+#define ACP_DMA_PRIO_3 0x000006C
+#define ACP_DMA_PRIO_4 0x0000070
+#define ACP_DMA_PRIO_5 0x0000074
+#define ACP_DMA_PRIO_6 0x0000078
+#define ACP_DMA_PRIO_7 0x000007C
+#define ACP_DMA_CUR_DSCR_0 0x0000080
+#define ACP_DMA_CUR_DSCR_1 0x0000084
+#define ACP_DMA_CUR_DSCR_2 0x0000088
+#define ACP_DMA_CUR_DSCR_3 0x000008C
+#define ACP_DMA_CUR_DSCR_4 0x0000090
+#define ACP_DMA_CUR_DSCR_5 0x0000094
+#define ACP_DMA_CUR_DSCR_6 0x0000098
+#define ACP_DMA_CUR_DSCR_7 0x000009C
+#define ACP_DMA_CUR_TRANS_CNT_0 0x00000A0
+#define ACP_DMA_CUR_TRANS_CNT_1 0x00000A4
+#define ACP_DMA_CUR_TRANS_CNT_2 0x00000A8
+#define ACP_DMA_CUR_TRANS_CNT_3 0x00000AC
+#define ACP_DMA_CUR_TRANS_CNT_4 0x00000B0
+#define ACP_DMA_CUR_TRANS_CNT_5 0x00000B4
+#define ACP_DMA_CUR_TRANS_CNT_6 0x00000B8
+#define ACP_DMA_CUR_TRANS_CNT_7 0x00000BC
+#define ACP_DMA_ERR_STS_0 0x00000C0
+#define ACP_DMA_ERR_STS_1 0x00000C4
+#define ACP_DMA_ERR_STS_2 0x00000C8
+#define ACP_DMA_ERR_STS_3 0x00000CC
+#define ACP_DMA_ERR_STS_4 0x00000D0
+#define ACP_DMA_ERR_STS_5 0x00000D4
+#define ACP_DMA_ERR_STS_6 0x00000D8
+#define ACP_DMA_ERR_STS_7 0x00000DC
+#define ACP_DMA_DESC_BASE_ADDR 0x00000E0
+#define ACP_DMA_DESC_MAX_NUM_DSCR 0x00000E4
+#define ACP_DMA_CH_STS 0x00000E8
+#define ACP_DMA_CH_GROUP 0x00000EC
+#define ACP_DMA_CH_RST_STS 0x00000F0
+
+/* Registers from ACP_AXI2AXIATU block */
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x0000C00
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x0000C04
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x0000C08
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x0000C0C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x0000C10
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x0000C14
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x0000C18
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x0000C1C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x0000C20
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x0000C24
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x0000C28
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x0000C2C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x0000C30
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x0000C34
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x0000C38
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x0000C3C
+#define ACPAXI2AXI_ATU_CTRL 0x0000C40
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_9 0x0000C44
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_9 0x0000C48
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_10 0x0000C4C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_10 0x0000C50
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_11 0x0000C54
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_11 0x0000C58
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_12 0x0000C5C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_12 0x0000C60
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_13 0x0000C64
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_13 0x0000C68
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_14 0x0000C6C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_14 0x0000C70
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_15 0x0000C74
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_15 0x0000C78
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_16 0x0000C7C
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_16 0x0000C80
+
+/* Registers from ACP_CLKRST block */
+#define ACP_SOFT_RESET 0x0001000
+#define ACP_CONTROL 0x0001004
+#define ACP_STATUS 0x0001008
+#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x0001010
+#define ACP_ZSC_DSP_CTRL 0x0001014
+#define ACP_ZSC_STS 0x0001018
+#define ACP_PGFSM_CONTROL 0x0001024
+#define ACP_PGFSM_STATUS 0x0001028
+#define ACP_CLKMUX_SEL 0x000102C
+
+/* Registers from ACP_AON block */
+#define ACP_PME_EN 0x0001400
+#define ACP_DEVICE_STATE 0x0001404
+#define AZ_DEVICE_STATE 0x0001408
+#define ACP_PIN_CONFIG 0x0001440
+#define ACP_PAD_PULLUP_CTRL 0x0001444
+#define ACP_PAD_PULLDOWN_CTRL 0x0001448
+#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x000144C
+#define ACP_PAD_SCHMEN_CTRL 0x0001450
+#define ACP_SW_PAD_KEEPER_EN 0x0001454
+#define ACP_SW_WAKE_EN 0x0001458
+#define ACP_I2S_WAKE_EN 0x000145C
+#define ACP_SW1_WAKE_EN 0x0001460
+
+/* Registers from ACP_P1_MISC block */
+#define ACP_EXTERNAL_INTR_ENB 0x0001A00
+#define ACP_EXTERNAL_INTR_CNTL 0x0001A04
+#define ACP_EXTERNAL_INTR_CNTL1 0x0001A08
+#define ACP_EXTERNAL_INTR_STAT 0x0001A0C
+#define ACP_EXTERNAL_INTR_STAT1 0x0001A10
+#define ACP_ERROR_STATUS 0x0001A4C
+#define ACP_P1_SW_I2S_ERROR_REASON 0x0001A50
+#define ACP_P1_SW_POS_TRACK_I2S_TX_CTRL 0x0001A6C
+#define ACP_P1_SW_I2S_TX_DMA_POS 0x0001A70
+#define ACP_P1_SW_POS_TRACK_I2S_RX_CTRL 0x0001A74
+#define ACP_P1_SW_I2S_RX_DMA_POS 0x0001A78
+#define ACP_P1_DMIC_I2S_GPIO_INTR_CTRL 0x0001A7C
+#define ACP_P1_DMIC_I2S_GPIO_INTR_STATUS 0x0001A80
+#define ACP_SCRATCH_REG_BASE_ADDR 0x0001A84
+#define ACP_P1_SW_POS_TRACK_BT_TX_CTRL 0x0001A88
+#define ACP_P1_SW_BT_TX_DMA_POS 0x0001A8C
+#define ACP_P1_SW_POS_TRACK_HS_TX_CTRL 0x0001A90
+#define ACP_P1_SW_HS_TX_DMA_POS 0x0001A94
+#define ACP_P1_SW_POS_TRACK_BT_RX_CTRL 0x0001A98
+#define ACP_P1_SW_BT_RX_DMA_POS 0x0001A9C
+#define ACP_P1_SW_POS_TRACK_HS_RX_CTRL 0x0001AA0
+#define ACP_P1_SW_HS_RX_DMA_POS 0x0001AA4
+
+/* Registers from ACP_AUDIO_BUFFERS block */
+#define ACP_I2S_RX_RINGBUFADDR 0x0002000
+#define ACP_I2S_RX_RINGBUFSIZE 0x0002004
+#define ACP_I2S_RX_LINKPOSITIONCNTR 0x0002008
+#define ACP_I2S_RX_FIFOADDR 0x000200C
+#define ACP_I2S_RX_FIFOSIZE 0x0002010
+#define ACP_I2S_RX_DMA_SIZE 0x0002014
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0002018
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x000201C
+#define ACP_I2S_RX_INTR_WATERMARK_SIZE 0x0002020
+#define ACP_I2S_TX_RINGBUFADDR 0x0002024
+#define ACP_I2S_TX_RINGBUFSIZE 0x0002028
+#define ACP_I2S_TX_LINKPOSITIONCNTR 0x000202C
+#define ACP_I2S_TX_FIFOADDR 0x0002030
+#define ACP_I2S_TX_FIFOSIZE 0x0002034
+#define ACP_I2S_TX_DMA_SIZE 0x0002038
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x000203C
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0002040
+#define ACP_I2S_TX_INTR_WATERMARK_SIZE 0x0002044
+#define ACP_BT_RX_RINGBUFADDR 0x0002048
+#define ACP_BT_RX_RINGBUFSIZE 0x000204C
+#define ACP_BT_RX_LINKPOSITIONCNTR 0x0002050
+#define ACP_BT_RX_FIFOADDR 0x0002054
+#define ACP_BT_RX_FIFOSIZE 0x0002058
+#define ACP_BT_RX_DMA_SIZE 0x000205C
+#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0002060
+#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x0002064
+#define ACP_BT_RX_INTR_WATERMARK_SIZE 0x0002068
+#define ACP_BT_TX_RINGBUFADDR 0x000206C
+#define ACP_BT_TX_RINGBUFSIZE 0x0002070
+#define ACP_BT_TX_LINKPOSITIONCNTR 0x0002074
+#define ACP_BT_TX_FIFOADDR 0x0002078
+#define ACP_BT_TX_FIFOSIZE 0x000207C
+#define ACP_BT_TX_DMA_SIZE 0x0002080
+#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0002084
+#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x0002088
+#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x000208C
+#define ACP_HS_RX_RINGBUFADDR 0x0002090
+#define ACP_HS_RX_RINGBUFSIZE 0x0002094
+#define ACP_HS_RX_LINKPOSITIONCNTR 0x0002098
+#define ACP_HS_RX_FIFOADDR 0x000209C
+#define ACP_HS_RX_FIFOSIZE 0x00020A0
+#define ACP_HS_RX_DMA_SIZE 0x00020A4
+#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x00020A8
+#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x00020AC
+#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x00020B0
+#define ACP_HS_TX_RINGBUFADDR 0x00020B4
+#define ACP_HS_TX_RINGBUFSIZE 0x00020B8
+#define ACP_HS_TX_LINKPOSITIONCNTR 0x00020BC
+#define ACP_HS_TX_FIFOADDR 0x00020C0
+#define ACP_HS_TX_FIFOSIZE 0x00020C4
+#define ACP_HS_TX_DMA_SIZE 0x00020C8
+#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x00020CC
+#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x00020D0
+#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x00020D4
+
+/* Registers from ACP_I2S_TDM block */
+#define ACP_I2STDM_IER 0x0002400
+#define ACP_I2STDM_IRER 0x0002404
+#define ACP_I2STDM_RXFRMT 0x0002408
+#define ACP_I2STDM_ITER 0x000240C
+#define ACP_I2STDM_TXFRMT 0x0002410
+#define ACP_I2STDM0_MSTRCLKGEN 0x0002414
+#define ACP_I2STDM1_MSTRCLKGEN 0x0002418
+#define ACP_I2STDM2_MSTRCLKGEN 0x000241C
+#define ACP_I2STDM_REFCLKGEN 0x0002420
+
+/* Registers from ACP_BT_TDM block */
+#define ACP_BTTDM_IER 0x0002800
+#define ACP_BTTDM_IRER 0x0002804
+#define ACP_BTTDM_RXFRMT 0x0002808
+#define ACP_BTTDM_ITER 0x000280C
+#define ACP_BTTDM_TXFRMT 0x0002810
+#define ACP_HSTDM_IER 0x0002814
+#define ACP_HSTDM_IRER 0x0002818
+#define ACP_HSTDM_RXFRMT 0x000281C
+#define ACP_HSTDM_ITER 0x0002820
+#define ACP_HSTDM_TXFRMT 0x0002824
+
+/* Registers from ACP_WOV block */
+#define ACP_WOV_PDM_ENABLE 0x0002C04
+#define ACP_WOV_PDM_DMA_ENABLE 0x0002C08
+#define ACP_WOV_RX_RINGBUFADDR 0x0002C0C
+#define ACP_WOV_RX_RINGBUFSIZE 0x0002C10
+#define ACP_WOV_RX_LINKPOSITIONCNTR 0x0002C14
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH 0x0002C18
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW 0x0002C1C
+#define ACP_WOV_RX_INTR_WATERMARK_SIZE 0x0002C20
+#define ACP_WOV_PDM_FIFO_FLUSH 0x0002C24
+#define ACP_WOV_PDM_NO_OF_CHANNELS 0x0002C28
+#define ACP_WOV_PDM_DECIMATION_FACTOR 0x0002C2C
+#define ACP_WOV_PDM_VAD_CTRL 0x0002C30
+#define ACP_WOV_WAKE 0x0002C54
+#define ACP_WOV_BUFFER_STATUS 0x0002C58
+#define ACP_WOV_MISC_CTRL 0x0002C5C
+#define ACP_WOV_CLK_CTRL 0x0002C60
+#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x0002C64
+#define ACP_WOV_ERROR_STATUS_REGISTER 0x0002C68
+#define ACP_PDM_CLKDIV 0x0002C6C
+
+/* Registers from ACP_P1_AUDIO_BUFFERS block */
+#define ACP_P1_I2S_RX_RINGBUFADDR 0x0003A00
+#define ACP_P1_I2S_RX_RINGBUFSIZE 0x0003A04
+#define ACP_P1_I2S_RX_LINKPOSITIONCNTR 0x0003A08
+#define ACP_P1_I2S_RX_FIFOADDR 0x0003A0C
+#define ACP_P1_I2S_RX_FIFOSIZE 0x0003A10
+#define ACP_P1_I2S_RX_DMA_SIZE 0x0003A14
+#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x0003A18
+#define ACP_P1_I2S_RX_LINEARPOSITIONCNTR_LOW 0x0003A1C
+#define ACP_P1_I2S_RX_INTR_WATERMARK_SIZE 0x0003A20
+#define ACP_P1_I2S_TX_RINGBUFADDR 0x0003A24
+#define ACP_P1_I2S_TX_RINGBUFSIZE 0x0003A28
+#define ACP_P1_I2S_TX_LINKPOSITIONCNTR 0x0003A2C
+#define ACP_P1_I2S_TX_FIFOADDR 0x0003A30
+#define ACP_P1_I2S_TX_FIFOSIZE 0x0003A34
+#define ACP_P1_I2S_TX_DMA_SIZE 0x0003A38
+#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x0003A3C
+#define ACP_P1_I2S_TX_LINEARPOSITIONCNTR_LOW 0x0003A40
+#define ACP_P1_I2S_TX_INTR_WATERMARK_SIZE 0x0003A44
+#define ACP_P1_BT_RX_RINGBUFADDR 0x0003A48
+#define ACP_P1_BT_RX_RINGBUFSIZE 0x0003A4C
+#define ACP_P1_BT_RX_LINKPOSITIONCNTR 0x0003A50
+#define ACP_P1_BT_RX_FIFOADDR 0x0003A54
+#define ACP_P1_BT_RX_FIFOSIZE 0x0003A58
+#define ACP_P1_BT_RX_DMA_SIZE 0x0003A5C
+#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_HIGH 0x0003A60
+#define ACP_P1_BT_RX_LINEARPOSITIONCNTR_LOW 0x0003A64
+#define ACP_P1_BT_RX_INTR_WATERMARK_SIZE 0x0003A68
+#define ACP_P1_BT_TX_RINGBUFADDR 0x0003A6C
+#define ACP_P1_BT_TX_RINGBUFSIZE 0x0003A70
+#define ACP_P1_BT_TX_LINKPOSITIONCNTR 0x0003A74
+#define ACP_P1_BT_TX_FIFOADDR 0x0003A78
+#define ACP_P1_BT_TX_FIFOSIZE 0x0003A7C
+#define ACP_P1_BT_TX_DMA_SIZE 0x0003A80
+#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_HIGH 0x0003A84
+#define ACP_P1_BT_TX_LINEARPOSITIONCNTR_LOW 0x0003A88
+#define ACP_P1_BT_TX_INTR_WATERMARK_SIZE 0x0003A8C
+#define ACP_P1_HS_RX_RINGBUFADDR 0x0003A90
+#define ACP_P1_HS_RX_RINGBUFSIZE 0x0003A94
+#define ACP_P1_HS_RX_LINKPOSITIONCNTR 0x0003A98
+#define ACP_P1_HS_RX_FIFOADDR 0x0003A9C
+#define ACP_P1_HS_RX_FIFOSIZE 0x0003AA0
+#define ACP_P1_HS_RX_DMA_SIZE 0x0003AA4
+#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_HIGH 0x0003AA8
+#define ACP_P1_HS_RX_LINEARPOSITIONCNTR_LOW 0x0003AAC
+#define ACP_P1_HS_RX_INTR_WATERMARK_SIZE 0x0003AB0
+#define ACP_P1_HS_TX_RINGBUFADDR 0x0003AB4
+#define ACP_P1_HS_TX_RINGBUFSIZE 0x0003AB8
+#define ACP_P1_HS_TX_LINKPOSITIONCNTR 0x0003ABC
+#define ACP_P1_HS_TX_FIFOADDR 0x0003AC0
+#define ACP_P1_HS_TX_FIFOSIZE 0x0003AC4
+#define ACP_P1_HS_TX_DMA_SIZE 0x0003AC8
+#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_HIGH 0x0003ACC
+#define ACP_P1_HS_TX_LINEARPOSITIONCNTR_LOW 0x0003AD0
+#define ACP_P1_HS_TX_INTR_WATERMARK_SIZE 0x0003AD4
+
+/* Registers from ACP_SCRATCH block */
+#define ACP_SCRATCH_REG_0 0x0010000
+#define ACP_SCRATCH_REG_1 0x0010004
+#define ACP_SCRATCH_REG_2 0x0010008
+#define ACP_SCRATCH_REG_3 0x001000C
+#define ACP_SCRATCH_REG_4 0x0010010
+#define ACP_SCRATCH_REG_5 0x0010014
+#define ACP_SCRATCH_REG_6 0x0010018
+#define ACP_SCRATCH_REG_7 0x001001C
+#define ACP_SCRATCH_REG_8 0x0010020
+#define ACP_SCRATCH_REG_9 0x0010024
+#define ACP_SCRATCH_REG_10 0x0010028
+#define ACP_SCRATCH_REG_11 0x001002C
+#define ACP_SCRATCH_REG_12 0x0010030
+#define ACP_SCRATCH_REG_13 0x0010034
+#define ACP_SCRATCH_REG_14 0x0010038
+#define ACP_SCRATCH_REG_15 0x001003C
+#define ACP_SCRATCH_REG_16 0x0010040
+#define ACP_SCRATCH_REG_17 0x0010044
+#define ACP_SCRATCH_REG_18 0x0010048
+#define ACP_SCRATCH_REG_19 0x001004C
+#define ACP_SCRATCH_REG_20 0x0010050
+#define ACP_SCRATCH_REG_21 0x0010054
+#define ACP_SCRATCH_REG_22 0x0010058
+#define ACP_SCRATCH_REG_23 0x001005C
+#define ACP_SCRATCH_REG_24 0x0010060
+#define ACP_SCRATCH_REG_25 0x0010064
+#define ACP_SCRATCH_REG_26 0x0010068
+#define ACP_SCRATCH_REG_27 0x001006C
+#define ACP_SCRATCH_REG_28 0x0010070
+#define ACP_SCRATCH_REG_29 0x0010074
+#define ACP_SCRATCH_REG_30 0x0010078
+#define ACP_SCRATCH_REG_31 0x001007C
+#define ACP_SCRATCH_REG_32 0x0010080
+#define ACP_SCRATCH_REG_33 0x0010084
+#define ACP_SCRATCH_REG_34 0x0010088
+#define ACP_SCRATCH_REG_35 0x001008C
+#define ACP_SCRATCH_REG_36 0x0010090
+#define ACP_SCRATCH_REG_37 0x0010094
+#define ACP_SCRATCH_REG_38 0x0010098
+#define ACP_SCRATCH_REG_39 0x001009C
+#define ACP_SCRATCH_REG_40 0x00100A0
+#define ACP_SCRATCH_REG_41 0x00100A4
+#define ACP_SCRATCH_REG_42 0x00100A8
+#define ACP_SCRATCH_REG_43 0x00100AC
+#define ACP_SCRATCH_REG_44 0x00100B0
+#define ACP_SCRATCH_REG_45 0x00100B4
+#define ACP_SCRATCH_REG_46 0x00100B8
+#define ACP_SCRATCH_REG_47 0x00100BC
+#define ACP_SCRATCH_REG_48 0x00100C0
+#define ACP_SCRATCH_REG_49 0x00100C4
+#define ACP_SCRATCH_REG_50 0x00100C8
+#define ACP_SCRATCH_REG_51 0x00100CC
+#define ACP_SCRATCH_REG_52 0x00100D0
+#define ACP_SCRATCH_REG_53 0x00100D4
+#define ACP_SCRATCH_REG_54 0x00100D8
+#define ACP_SCRATCH_REG_55 0x00100DC
+#define ACP_SCRATCH_REG_56 0x00100E0
+#define ACP_SCRATCH_REG_57 0x00100E4
+#define ACP_SCRATCH_REG_58 0x00100E8
+#define ACP_SCRATCH_REG_59 0x00100EC
+#define ACP_SCRATCH_REG_60 0x00100F0
+#define ACP_SCRATCH_REG_61 0x00100F4
+#define ACP_SCRATCH_REG_62 0x00100F8
+#define ACP_SCRATCH_REG_63 0x00100FC
+#define ACP_SCRATCH_REG_64 0x0010100
+#define ACP_SCRATCH_REG_65 0x0010104
+#define ACP_SCRATCH_REG_66 0x0010108
+#define ACP_SCRATCH_REG_67 0x001010C
+#define ACP_SCRATCH_REG_68 0x0010110
+#define ACP_SCRATCH_REG_69 0x0010114
+#define ACP_SCRATCH_REG_70 0x0010118
+#define ACP_SCRATCH_REG_71 0x001011C
+#define ACP_SCRATCH_REG_72 0x0010120
+#define ACP_SCRATCH_REG_73 0x0010124
+#define ACP_SCRATCH_REG_74 0x0010128
+#define ACP_SCRATCH_REG_75 0x001012C
+#define ACP_SCRATCH_REG_76 0x0010130
+#define ACP_SCRATCH_REG_77 0x0010134
+#define ACP_SCRATCH_REG_78 0x0010138
+#define ACP_SCRATCH_REG_79 0x001013C
+#define ACP_SCRATCH_REG_80 0x0010140
+#define ACP_SCRATCH_REG_81 0x0010144
+#define ACP_SCRATCH_REG_82 0x0010148
+#define ACP_SCRATCH_REG_83 0x001014C
+#define ACP_SCRATCH_REG_84 0x0010150
+#define ACP_SCRATCH_REG_85 0x0010154
+#define ACP_SCRATCH_REG_86 0x0010158
+#define ACP_SCRATCH_REG_87 0x001015C
+#define ACP_SCRATCH_REG_88 0x0010160
+#define ACP_SCRATCH_REG_89 0x0010164
+#define ACP_SCRATCH_REG_90 0x0010168
+#define ACP_SCRATCH_REG_91 0x001016C
+#define ACP_SCRATCH_REG_92 0x0010170
+#define ACP_SCRATCH_REG_93 0x0010174
+#define ACP_SCRATCH_REG_94 0x0010178
+#define ACP_SCRATCH_REG_95 0x001017C
+#define ACP_SCRATCH_REG_96 0x0010180
+#define ACP_SCRATCH_REG_97 0x0010184
+#define ACP_SCRATCH_REG_98 0x0010188
+#define ACP_SCRATCH_REG_99 0x001018C
+#define ACP_SCRATCH_REG_100 0x0010190
+#define ACP_SCRATCH_REG_101 0x0010194
+#define ACP_SCRATCH_REG_102 0x0010198
+#define ACP_SCRATCH_REG_103 0x001019C
+#define ACP_SCRATCH_REG_104 0x00101A0
+#define ACP_SCRATCH_REG_105 0x00101A4
+#define ACP_SCRATCH_REG_106 0x00101A8
+#define ACP_SCRATCH_REG_107 0x00101AC
+#define ACP_SCRATCH_REG_108 0x00101B0
+#define ACP_SCRATCH_REG_109 0x00101B4
+#define ACP_SCRATCH_REG_110 0x00101B8
+#define ACP_SCRATCH_REG_111 0x00101BC
+#define ACP_SCRATCH_REG_112 0x00101C0
+#define ACP_SCRATCH_REG_113 0x00101C4
+#define ACP_SCRATCH_REG_114 0x00101C8
+#define ACP_SCRATCH_REG_115 0x00101CC
+#define ACP_SCRATCH_REG_116 0x00101D0
+#define ACP_SCRATCH_REG_117 0x00101D4
+#define ACP_SCRATCH_REG_118 0x00101D8
+#define ACP_SCRATCH_REG_119 0x00101DC
+#define ACP_SCRATCH_REG_120 0x00101E0
+#define ACP_SCRATCH_REG_121 0x00101E4
+#define ACP_SCRATCH_REG_122 0x00101E8
+#define ACP_SCRATCH_REG_123 0x00101EC
+#define ACP_SCRATCH_REG_124 0x00101F0
+#define ACP_SCRATCH_REG_125 0x00101F4
+#define ACP_SCRATCH_REG_126 0x00101F8
+#define ACP_SCRATCH_REG_127 0x00101FC
+#define ACP_SCRATCH_REG_128 0x0010200
+#endif
diff --git a/include/sound/cs42l42.h b/include/sound/cs42l42.h
index a55d522f1772..1d1c24fdd0ca 100644
--- a/include/sound/cs42l42.h
+++ b/include/sound/cs42l42.h
@@ -40,6 +40,7 @@
#define CS42L42_PAGE_30 0x3000
#define CS42L42_CHIP_ID 0x42A42
+#define CS42L83_CHIP_ID 0x42A83
/* Page 0x10 Global Registers */
#define CS42L42_DEVID_AB (CS42L42_PAGE_10 + 0x01)
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 6d3c82c4b6ac..25ec8c181688 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -258,7 +258,6 @@ struct hda_codec {
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
unsigned int forced_resume:1; /* forced resume for jack */
- unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
#ifdef CONFIG_PM
unsigned long power_on_acct;
@@ -293,8 +292,6 @@ struct hda_codec {
#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev)
#define hda_codec_dev(_dev) (&(_dev)->core.dev)
-#define hdac_to_hda_priv(_hdac) \
- container_of(_hdac, struct hdac_hda_priv, codec.core)
#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
#define list_for_each_codec(c, bus) \
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ad8b71b1dbb6..d37cf43546eb 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -260,7 +260,18 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_ML_LCAP 0x00
#define AZX_REG_ML_LCTL 0x04
+
+#define AZX_ML_LCTL_CPA BIT(23)
+#define AZX_ML_LCTL_CPA_SHIFT 23
+#define AZX_ML_LCTL_SPA BIT(16)
+#define AZX_ML_LCTL_SPA_SHIFT 16
+#define AZX_ML_LCTL_SCF GENMASK(3, 0)
+
#define AZX_REG_ML_LOSIDV 0x08
+
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define AZX_ML_LOSIDV_STREAM_MASK 0xFFFE
+
#define AZX_REG_ML_LSDIID 0x0C
#define AZX_REG_ML_LPSOO 0x10
#define AZX_REG_ML_LPSIO 0x12
@@ -268,15 +279,6 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_ML_LOUTPAY 0x20
#define AZX_REG_ML_LINPAY 0x30
-/* bit0 is reserved, with BIT(1) mapping to stream1 */
-#define ML_LOSIDV_STREAM_MASK 0xFFFE
-
-#define ML_LCTL_SCF_MASK 0xF
-#define AZX_MLCTL_SPA (0x1 << 16)
-#define AZX_MLCTL_CPA (0x1 << 23)
-#define AZX_MLCTL_SPA_SHIFT 16
-#define AZX_MLCTL_CPA_SHIFT 23
-
/* registers for DMA Resume Capability Structure */
#define AZX_DRSM_CAP_ID 0x5
#define AZX_REG_DRSM_CTL 0x4
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 797bf67a164d..35778f953a3f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -550,6 +551,7 @@ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev,
int idx, int direction, int tag);
struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
+void snd_hdac_stream_release_locked(struct hdac_stream *azx_dev);
void snd_hdac_stream_release(struct hdac_stream *azx_dev);
struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
int dir, int stream_tag);
@@ -560,8 +562,8 @@ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
unsigned int format_val);
void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
-void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stop_streams(struct hdac_bus *bus);
void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
@@ -589,6 +591,12 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
snd_hdac_reg_readw((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readb(dev, reg) \
snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
/* update a register, pass without AZX_REG_ prefix */
#define snd_hdac_stream_updatel(dev, reg, mask, val) \
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index d26234f9ee46..83aed26ab143 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -11,9 +11,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_ext_bus_ops *ext_ops);
void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
-int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
- struct hdac_device *hdev, int type);
-void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
@@ -80,12 +77,9 @@ struct hdac_ext_stream {
#define stream_to_hdac_ext_stream(s) \
container_of(s, struct hdac_ext_stream, hstream)
-void snd_hdac_ext_stream_init(struct hdac_bus *bus,
- struct hdac_ext_stream *hext_stream, int idx,
- int direction, int tag);
int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
int num_stream, int dir);
-void snd_hdac_stream_free_all(struct hdac_bus *bus);
+void snd_hdac_ext_stream_free_all(struct hdac_bus *bus);
void snd_hdac_link_free_all(struct hdac_bus *bus);
struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
@@ -188,12 +182,6 @@ void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
delay_us, timeout_us)
-#define snd_hdac_stream_readb_poll(strm, reg, val, cond, delay_us, timeout_us) \
- readb_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_stream_readl_poll(strm, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \
- delay_us, timeout_us)
struct hdac_ext_device;
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index 3d5cf201cd80..53470d6a28d6 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -136,6 +136,8 @@ bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type);
int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type);
+int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num);
+
struct nhlt_specific_cfg *
intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
u32 bus_id, u8 link_type, u8 vbps, u8 bps,
@@ -169,6 +171,11 @@ static inline int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8
return 0;
}
+static inline int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num)
+{
+ return 0;
+}
+
static inline struct nhlt_specific_cfg *
intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
u32 bus_id, u8 link_type, u8 vbps, u8 bps,
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 8d79cebf95f3..43d524580bd2 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -26,9 +26,6 @@ struct snd_dma_device {
struct device *dev; /* generic device */
};
-#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
-
-
/*
* buffer types
*/
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8c48a5bce88c..7b1a022910e8 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -346,6 +346,8 @@ static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy
struct snd_pcm_runtime {
/* -- Status -- */
+ snd_pcm_state_t state; /* stream state */
+ snd_pcm_state_t suspended_state; /* suspended stream state */
struct snd_pcm_substream *trigger_master;
struct timespec64 trigger_tstamp; /* trigger timestamp */
bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */
@@ -678,12 +680,26 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
*/
static inline int snd_pcm_running(struct snd_pcm_substream *substream)
{
- return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
- (substream->runtime->status->state == SNDRV_PCM_STATE_DRAINING &&
+ return (substream->runtime->state == SNDRV_PCM_STATE_RUNNING ||
+ (substream->runtime->state == SNDRV_PCM_STATE_DRAINING &&
substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
}
/**
+ * __snd_pcm_set_state - Change the current PCM state
+ * @runtime: PCM runtime to set
+ * @state: the current state to set
+ *
+ * Call within the stream lock
+ */
+static inline void __snd_pcm_set_state(struct snd_pcm_runtime *runtime,
+ snd_pcm_state_t state)
+{
+ runtime->state = state;
+ runtime->status->state = state; /* copy for mmap */
+}
+
+/**
* bytes_to_samples - Unit conversion of the size from bytes to samples
* @runtime: PCM runtime instance
* @size: size in bytes
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index ab55f40896e0..a0b827f0c2f6 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -39,6 +39,7 @@ struct asoc_simple_dai {
struct asoc_simple_data {
u32 convert_rate;
u32 convert_channels;
+ const char *convert_sample_format;
};
struct asoc_simple_jack {
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index bc7fd46ec2bc..82a7db23db69 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -14,7 +14,6 @@
* these tables are not constants, some fields can be used for
* pdata or machine ops
*/
-extern struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
@@ -30,6 +29,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
@@ -38,6 +38,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
/*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index aad24a1d3276..37bbfc8b45cb 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -31,31 +31,31 @@
#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .max = xmax, .platform_max = xmax, \
+ .rshift = shift_right, .max = xmax, \
.invert = xinvert, .autodisable = xautodisable})
#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \
+ .rshift = shift_right, .min = xmin, .max = xmax, \
.sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .max = xmax, .platform_max = xmax, .invert = xinvert})
+ {.reg = xreg, .max = xmax, .invert = xinvert})
#define SOC_DOUBLE_R_VALUE(xlreg, xrreg, xshift, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .platform_max = xmax, .invert = xinvert})
+ .max = xmax, .invert = xinvert})
#define SOC_DOUBLE_R_S_VALUE(xlreg, xrreg, xshift, xmin, xmax, xsign_bit, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .min = xmin, .platform_max = xmax, .sign_bit = xsign_bit, \
+ .max = xmax, .min = xmin, .sign_bit = xsign_bit, \
.invert = xinvert})
#define SOC_DOUBLE_R_RANGE_VALUE(xlreg, xrreg, xshift, xmin, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .min = xmin, .max = xmax, .platform_max = xmax, .invert = xinvert})
+ .min = xmin, .max = xmax, .invert = xinvert})
#define SOC_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
@@ -68,7 +68,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -99,7 +99,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
@@ -199,7 +199,7 @@
.put = snd_soc_put_volsw, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, \
- .min = xmin, .max = xmax, .platform_max = xmax, \
+ .min = xmin, .max = xmax, \
.sign_bit = 7,} }
#define SOC_DOUBLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -273,7 +273,7 @@
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = xshift, \
.rshift = xshift, .min = xmin, .max = xmax, \
- .platform_max = xmax, .invert = xinvert} }
+ .invert = xinvert} }
#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -1062,7 +1062,8 @@ struct snd_soc_pcm_runtime {
unsigned int params_select; /* currently selected param for dai link */
/* Dynamic PCM BE runtime data */
- struct snd_soc_dpcm_runtime dpcm[2];
+ struct snd_soc_dpcm_runtime dpcm[SNDRV_PCM_STREAM_LAST + 1];
+ struct snd_soc_dapm_widget *c2c_widget[SNDRV_PCM_STREAM_LAST + 1];
long pmdown_time;
@@ -1078,11 +1079,6 @@ struct snd_soc_pcm_runtime {
* asoc_rtd_to_codec()
*/
struct snd_soc_dai **dais;
- unsigned int num_codecs;
- unsigned int num_cpus;
-
- struct snd_soc_dapm_widget *playback_widget;
- struct snd_soc_dapm_widget *capture_widget;
struct delayed_work delayed_work;
void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
@@ -1108,7 +1104,7 @@ struct snd_soc_pcm_runtime {
};
/* see soc_new_pcm_runtime() */
#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
-#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus]
+#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
#define asoc_substream_to_rtd(substream) \
(struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
@@ -1118,15 +1114,15 @@ struct snd_soc_pcm_runtime {
(i)++)
#define for_each_rtd_cpu_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
+ ((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
(i)++)
#define for_each_rtd_codec_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
+ ((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
(i)++)
#define for_each_rtd_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \
+ ((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \
((dai) = (rtd)->dais[i]); \
(i)++)
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 367dccfea7ad..341fef19e612 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -89,6 +89,7 @@ struct snd_sof_pdata {
/* machine */
struct platform_device *pdev_mach;
const struct snd_soc_acpi_mach *machine;
+ const struct snd_sof_of_mach *of_machine;
void *hw_pdata;
@@ -102,6 +103,7 @@ struct snd_sof_pdata {
struct sof_dev_desc {
/* list of machines using this configuration */
struct snd_soc_acpi_mach *machines;
+ struct snd_sof_of_mach *of_machines;
/* alternate list of machines using this configuration */
struct snd_soc_acpi_mach *alt_machines;
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
index 7379a33d7247..983d374fe511 100644
--- a/include/sound/sof/control.h
+++ b/include/sound/sof/control.h
@@ -117,11 +117,11 @@ struct sof_ipc_ctrl_data {
/* control data - add new types if needed */
union {
/* channel values can be used by volume type controls */
- struct sof_ipc_ctrl_value_chan chanv[0];
+ DECLARE_FLEX_ARRAY(struct sof_ipc_ctrl_value_chan, chanv);
/* component values used by routing controls like mux, mixer */
- struct sof_ipc_ctrl_value_comp compv[0];
+ DECLARE_FLEX_ARRAY(struct sof_ipc_ctrl_value_comp, compv);
/* data can be used by binary controls */
- struct sof_abi_hdr data[0];
+ DECLARE_FLEX_ARRAY(struct sof_abi_hdr, data);
};
} __packed;
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 21d98f31a9ca..83fd81c82e4c 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -84,6 +84,7 @@ enum sof_ipc_dai_type {
SOF_DAI_AMD_BT, /**< AMD ACP BT*/
SOF_DAI_AMD_SP, /**< AMD ACP SP */
SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
+ SOF_DAI_AMD_HS, /**< Amd HS */
SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
};
@@ -112,6 +113,7 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_acp_params acpbt;
struct sof_ipc_dai_acp_params acpsp;
struct sof_ipc_dai_acpdmic_params acpdmic;
+ struct sof_ipc_dai_acp_params acphs;
struct sof_ipc_dai_mtk_afe_params afe;
};
} __packed;
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index a795deacc2ea..99efe0ef1784 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -427,6 +427,11 @@ struct sof_ipc4_dx_state_info {
#define SOF_IPC4_NOTIFICATION_TYPE_GET(x) (((x) & SOF_IPC4_NOTIFICATION_TYPE_MASK) >> \
SOF_IPC4_NOTIFICATION_TYPE_SHIFT)
+#define SOF_IPC4_LOG_CORE_SHIFT 12
+#define SOF_IPC4_LOG_CORE_MASK GENMASK(15, 12)
+#define SOF_IPC4_LOG_CORE_GET(x) (((x) & SOF_IPC4_LOG_CORE_MASK) >> \
+ SOF_IPC4_LOG_CORE_SHIFT)
+
/* Value of notification type field - must fit into 8 bits */
enum sof_ipc4_notification_type {
/* Phrase detected (notification from WoV module) */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 73df80d462dc..ed50e81174bf 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -84,7 +84,6 @@ struct raid56_bio_trace_info;
EM( IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS") \
EM( IO_TREE_BTREE_INODE_IO, "BTREE_INODE_IO") \
EM( IO_TREE_INODE_IO, "INODE_IO") \
- EM( IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE") \
EM( IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS") \
EM( IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES") \
EM( IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES") \
@@ -154,7 +153,6 @@ FLUSH_STATES
{ EXTENT_NODATASUM, "NODATASUM"}, \
{ EXTENT_CLEAR_META_RESV, "CLEAR_META_RESV"}, \
{ EXTENT_NEED_WAIT, "NEED_WAIT"}, \
- { EXTENT_DAMAGED, "DAMAGED"}, \
{ EXTENT_NORESERVE, "NORESERVE"}, \
{ EXTENT_QGROUP_RESERVED, "QGROUP_RESERVED"}, \
{ EXTENT_CLEAR_DATA_RESV, "CLEAR_DATA_RESV"}, \
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index bad21222130e..da0eaae98fa3 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -49,7 +49,7 @@
/* note: we begin tracing dlm_lock_start() only if ls and lkb are found */
TRACE_EVENT(dlm_lock_start,
- TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name,
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name,
unsigned int namelen, int mode, __u32 flags),
TP_ARGS(ls, lkb, name, namelen, mode, flags),
@@ -91,10 +91,11 @@ TRACE_EVENT(dlm_lock_start,
TRACE_EVENT(dlm_lock_end,
- TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, void *name,
- unsigned int namelen, int mode, __u32 flags, int error),
+ TP_PROTO(struct dlm_ls *ls, struct dlm_lkb *lkb, const void *name,
+ unsigned int namelen, int mode, __u32 flags, int error,
+ bool kernel_lock),
- TP_ARGS(ls, lkb, name, namelen, mode, flags, error),
+ TP_ARGS(ls, lkb, name, namelen, mode, flags, error, kernel_lock),
TP_STRUCT__entry(
__field(__u32, ls_id)
@@ -113,6 +114,7 @@ TRACE_EVENT(dlm_lock_end,
__entry->lkb_id = lkb->lkb_id;
__entry->mode = mode;
__entry->flags = flags;
+ __entry->error = error;
r = lkb->lkb_resource;
if (r)
@@ -122,14 +124,14 @@ TRACE_EVENT(dlm_lock_end,
memcpy(__get_dynamic_array(res_name), name,
__get_dynamic_array_len(res_name));
- /* return value will be zeroed in those cases by dlm_lock()
- * we do it here again to not introduce more overhead if
- * trace isn't running and error reflects the return value.
- */
- if (error == -EAGAIN || error == -EDEADLK)
- __entry->error = 0;
- else
- __entry->error = error;
+ if (kernel_lock) {
+ /* return value will be zeroed in those cases by dlm_lock()
+ * we do it here again to not introduce more overhead if
+ * trace isn't running and error reflects the return value.
+ */
+ if (error == -EAGAIN || error == -EDEADLK)
+ __entry->error = 0;
+ }
),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index 57de057bd503..4f4c44ea3a65 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -53,15 +53,14 @@ TRACE_EVENT(erofs_lookup,
);
TRACE_EVENT(erofs_fill_inode,
- TP_PROTO(struct inode *inode, int isdir),
- TP_ARGS(inode, isdir),
+ TP_PROTO(struct inode *inode),
+ TP_ARGS(inode),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
__field(erofs_blk_t, blkaddr )
__field(unsigned int, ofs )
- __field(int, isdir )
),
TP_fast_assign(
@@ -69,13 +68,11 @@ TRACE_EVENT(erofs_fill_inode,
__entry->nid = EROFS_I(inode)->nid;
__entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid));
__entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid));
- __entry->isdir = isdir;
),
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d",
+ TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
show_dev_nid(__entry),
- __entry->blkaddr, __entry->ofs,
- __entry->isdir)
+ __entry->blkaddr, __entry->ofs)
);
TRACE_EVENT(erofs_readpage,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index f1e922237736..c6b372401c27 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1578,9 +1578,10 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
TRACE_EVENT(f2fs_update_extent_tree_range,
TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
- unsigned int len),
+ unsigned int len,
+ unsigned int c_len),
- TP_ARGS(inode, pgofs, blkaddr, len),
+ TP_ARGS(inode, pgofs, blkaddr, len, c_len),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1588,6 +1589,7 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
__field(unsigned int, pgofs)
__field(u32, blk)
__field(unsigned int, len)
+ __field(unsigned int, c_len)
),
TP_fast_assign(
@@ -1596,14 +1598,17 @@ TRACE_EVENT(f2fs_update_extent_tree_range,
__entry->pgofs = pgofs;
__entry->blk = blkaddr;
__entry->len = len;
+ __entry->c_len = c_len;
),
TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
- "blkaddr = %u, len = %u",
+ "blkaddr = %u, len = %u, "
+ "c_len = %u",
show_dev_ino(__entry),
__entry->pgofs,
__entry->blk,
- __entry->len)
+ __entry->len,
+ __entry->c_len)
);
TRACE_EVENT(f2fs_shrink_extent_tree,
@@ -1823,7 +1828,10 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, app_bio)
__field(unsigned long long, app_wio)
__field(unsigned long long, app_mio)
+ __field(unsigned long long, app_bcdio)
+ __field(unsigned long long, app_mcdio)
__field(unsigned long long, fs_dio)
+ __field(unsigned long long, fs_cdio)
__field(unsigned long long, fs_nio)
__field(unsigned long long, fs_mio)
__field(unsigned long long, fs_gc_dio)
@@ -1835,6 +1843,8 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, app_brio)
__field(unsigned long long, app_rio)
__field(unsigned long long, app_mrio)
+ __field(unsigned long long, app_bcrio)
+ __field(unsigned long long, app_mcrio)
__field(unsigned long long, fs_drio)
__field(unsigned long long, fs_gdrio)
__field(unsigned long long, fs_cdrio)
@@ -1849,7 +1859,10 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_bio = iostat[APP_BUFFERED_IO];
__entry->app_wio = iostat[APP_WRITE_IO];
__entry->app_mio = iostat[APP_MAPPED_IO];
+ __entry->app_bcdio = iostat[APP_BUFFERED_CDATA_IO];
+ __entry->app_mcdio = iostat[APP_MAPPED_CDATA_IO];
__entry->fs_dio = iostat[FS_DATA_IO];
+ __entry->fs_cdio = iostat[FS_CDATA_IO];
__entry->fs_nio = iostat[FS_NODE_IO];
__entry->fs_mio = iostat[FS_META_IO];
__entry->fs_gc_dio = iostat[FS_GC_DATA_IO];
@@ -1861,6 +1874,8 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_brio = iostat[APP_BUFFERED_READ_IO];
__entry->app_rio = iostat[APP_READ_IO];
__entry->app_mrio = iostat[APP_MAPPED_READ_IO];
+ __entry->app_bcrio = iostat[APP_BUFFERED_CDATA_READ_IO];
+ __entry->app_mcrio = iostat[APP_MAPPED_CDATA_READ_IO];
__entry->fs_drio = iostat[FS_DATA_READ_IO];
__entry->fs_gdrio = iostat[FS_GDATA_READ_IO];
__entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
@@ -1870,20 +1885,24 @@ TRACE_EVENT(f2fs_iostat,
),
TP_printk("dev = (%d,%d), "
- "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
- "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, "
+ "compr(buffered=%llu, mapped=%llu)], "
+ "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu], "
"gc [data=%llu, node=%llu], "
"cp [data=%llu, node=%llu, meta=%llu], "
"app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
- "fs [data=%llu, (gc_data=%llu, compr_data=%llu), "
+ "compr(buffered=%llu, mapped=%llu)], "
+ "fs [data=%llu, (gc_data=%llu, cdata=%llu), "
"node=%llu, meta=%llu]",
show_dev(__entry->dev), __entry->app_wio, __entry->app_dio,
- __entry->app_bio, __entry->app_mio, __entry->fs_dio,
+ __entry->app_bio, __entry->app_mio, __entry->app_bcdio,
+ __entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
__entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
__entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
__entry->fs_cp_nio, __entry->fs_cp_mio,
__entry->app_rio, __entry->app_drio, __entry->app_brio,
- __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio,
+ __entry->app_mrio, __entry->app_bcrio, __entry->app_mcrio,
+ __entry->fs_drio, __entry->fs_gdrio,
__entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
);
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
new file mode 100644
index 000000000000..f05c5fa668a2
--- /dev/null
+++ b/include/trace/events/habanalabs.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2021 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM habanalabs
+
+#if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HABANALABS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(habanalabs_mmu_template,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, virt_addr)
+ __field(u64, phys_addr)
+ __field(u32, page_size)
+ __field(u8, flush_pte)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->virt_addr = virt_addr;
+ __entry->phys_addr = phys_addr;
+ __entry->page_size = page_size;
+ __entry->flush_pte = flush_pte;
+ ),
+
+ TP_printk("%s: vaddr: %#llx, paddr: %#llx, psize: %#x, flush: %s",
+ __get_str(dname),
+ __entry->virt_addr,
+ __entry->phys_addr,
+ __entry->page_size,
+ __entry->flush_pte ? "true" : "false")
+);
+
+DEFINE_EVENT(habanalabs_mmu_template, habanalabs_mmu_map,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte));
+
+DEFINE_EVENT(habanalabs_mmu_template, habanalabs_mmu_unmap,
+ TP_PROTO(struct device *dev, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte),
+ TP_ARGS(dev, virt_addr, phys_addr, page_size, flush_pte));
+
+DECLARE_EVENT_CLASS(habanalabs_dma_alloc_template,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, cpu_addr)
+ __field(u64, dma_addr)
+ __field(u32, size)
+ __field(const char *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->cpu_addr = cpu_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->caller = caller;
+ ),
+
+ TP_printk("%s: cpu_addr: %#llx, dma_addr: %#llx, size: %#x, caller: %s",
+ __get_str(dname),
+ __entry->cpu_addr,
+ __entry->dma_addr,
+ __entry->size,
+ __entry->caller)
+);
+
+DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_alloc,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+
+DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free,
+ TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
+ TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+
+#endif /* if !defined(_TRACE_HABANALABS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index d651f3437367..935af4947917 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -11,11 +11,14 @@
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
EM( SCAN_PMD_NULL, "pmd_null") \
+ EM( SCAN_PMD_NONE, "pmd_none") \
+ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
+ EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
@@ -166,5 +169,39 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
__entry->ret)
);
+TRACE_EVENT(mm_khugepaged_scan_file,
+
+ TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+ int present, int swap, int result),
+
+ TP_ARGS(mm, page, filename, present, swap, result),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, pfn)
+ __string(filename, filename)
+ __field(int, present)
+ __field(int, swap)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->pfn = page ? page_to_pfn(page) : -1;
+ __assign_str(filename, filename);
+ __entry->present = present;
+ __entry->swap = swap;
+ __entry->result = result;
+ ),
+
+ TP_printk("mm=%p, scan_pfn=0x%lx, filename=%s, present=%d, swap=%d, result=%s",
+ __entry->mm,
+ __entry->pfn,
+ __get_str(filename),
+ __entry->present,
+ __entry->swap,
+ __print_symbolic(__entry->result, SCAN_STATUS))
+);
+
#endif /* __HUGE_MEMORY_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index c5b21ff0ac85..936fd41bf147 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -655,6 +655,35 @@ TRACE_EVENT(io_uring_short_write,
__entry->wanted, __entry->got)
);
+/*
+ * io_uring_local_work_run - ran ring local task work
+ *
+ * @tctx: pointer to a io_uring_ctx
+ * @count: how many functions it ran
+ * @loops: how many loops it ran
+ *
+ */
+TRACE_EVENT(io_uring_local_work_run,
+
+ TP_PROTO(void *ctx, int count, unsigned int loops),
+
+ TP_ARGS(ctx, count, loops),
+
+ TP_STRUCT__entry (
+ __field(void *, ctx )
+ __field(int, count )
+ __field(unsigned int, loops )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->count = count;
+ __entry->loops = loops;
+ ),
+
+ TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops)
+);
+
#endif /* _TRACE_IO_URING_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 4cb51ace600d..243073cfc29d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -9,16 +9,15 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-DECLARE_EVENT_CLASS(kmem_alloc,
+TRACE_EVENT(kmem_cache_alloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
struct kmem_cache *s,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
+ gfp_t gfp_flags,
+ int node),
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
+ TP_ARGS(call_site, ptr, s, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
@@ -26,56 +25,42 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
+ __field( int, node )
__field( bool, accounted )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
- __entry->bytes_req = bytes_req;
- __entry->bytes_alloc = bytes_alloc;
+ __entry->bytes_req = s->object_size;
+ __entry->bytes_alloc = s->size;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
+ __entry->node = node;
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
((gfp_flags & __GFP_ACCOUNT) ||
- (s && s->flags & SLAB_ACCOUNT)) : false;
+ (s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
+ __entry->node,
__entry->accounted ? "true" : "false")
);
-DEFINE_EVENT(kmem_alloc, kmalloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
-
- TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
- size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
-);
-
-DECLARE_EVENT_CLASS(kmem_alloc_node,
+TRACE_EVENT(kmalloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
- struct kmem_cache *s,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags,
int node),
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
@@ -84,7 +69,6 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
__field( int, node )
- __field( bool, accounted )
),
TP_fast_assign(
@@ -94,9 +78,6 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
- __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
- ((gfp_flags & __GFP_ACCOUNT) ||
- (s && s->flags & SLAB_ACCOUNT)) : false;
),
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
@@ -106,25 +87,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
__entry->node,
- __entry->accounted ? "true" : "false")
-);
-
-DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
-);
-
-DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
-
- TP_PROTO(unsigned long call_site, const void *ptr,
- struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
- gfp_t gfp_flags, int node),
-
- TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
+ (IS_ENABLED(CONFIG_MEMCG_KMEM) &&
+ (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
);
TRACE_EVENT(kfree,
@@ -149,20 +113,20 @@ TRACE_EVENT(kfree,
TRACE_EVENT(kmem_cache_free,
- TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
+ TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s),
- TP_ARGS(call_site, ptr, name),
+ TP_ARGS(call_site, ptr, s),
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
- __string( name, name )
+ __string( name, s->name )
),
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
- __assign_str(name, name);
+ __assign_str(name, s->name);
),
TP_printk("call_site=%pS ptr=%p name=%s",
diff --git a/include/trace/events/maple_tree.h b/include/trace/events/maple_tree.h
new file mode 100644
index 000000000000..2be403bdc2bd
--- /dev/null
+++ b/include/trace/events/maple_tree.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM maple_tree
+
+#if !defined(_TRACE_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MM_H
+
+
+#include <linux/tracepoint.h>
+
+struct ma_state;
+
+TRACE_EVENT(ma_op,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+TRACE_EVENT(ma_read,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+
+TRACE_EVENT(ma_write,
+
+ TP_PROTO(const char *fn, struct ma_state *mas, unsigned long piv,
+ void *val),
+
+ TP_ARGS(fn, mas, piv, val),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(unsigned long, piv)
+ __field(void *, val)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->piv = piv;
+ __entry->val = val;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode %p (%lu %lu) range:%lu-%lu piv (%lu) val %p",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last,
+ (unsigned long) __entry->piv,
+ (void *) __entry->val
+ )
+)
+#endif /* _TRACE_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index 4661f7ba07c0..216de5f03621 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -42,6 +42,79 @@ TRACE_EVENT(vm_unmapped_area,
__entry->low_limit, __entry->high_limit, __entry->align_mask,
__entry->align_offset)
);
+
+TRACE_EVENT(vma_mas_szero,
+ TP_PROTO(struct maple_tree *mt, unsigned long start,
+ unsigned long end),
+
+ TP_ARGS(mt, start, end),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
+ __entry->mt,
+ (unsigned long) __entry->start,
+ (unsigned long) __entry->end
+ )
+);
+
+TRACE_EVENT(vma_store,
+ TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
+
+ TP_ARGS(mt, vma),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(struct vm_area_struct *, vma)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->vma = vma;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end - 1;
+ ),
+
+ TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
+ __entry->mt, __entry->vma,
+ (unsigned long) __entry->vm_start,
+ (unsigned long) __entry->vm_end
+ )
+);
+
+
+TRACE_EVENT(exit_mmap,
+ TP_PROTO(struct mm_struct *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(struct maple_tree *, mt)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->mt = &mm->mm_mt;
+ ),
+
+ TP_printk("mt_mod %p, DESTROY\n",
+ __entry->mt
+ )
+);
+
#endif
/* This part must be outside protection */
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index 65016a767b7a..f160d68f961d 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -27,9 +27,9 @@ TRACE_EVENT(scmi_fc_call,
__entry->val2 = val2;
),
- TP_printk("[0x%02X]:[0x%02X]:[%08X]:%u:%u",
- __entry->protocol_id, __entry->msg_id,
- __entry->res_id, __entry->val1, __entry->val2)
+ TP_printk("pt=%02X msg_id=%02X res_id:%u vals=%u:%u",
+ __entry->protocol_id, __entry->msg_id,
+ __entry->res_id, __entry->val1, __entry->val2)
);
TRACE_EVENT(scmi_xfer_begin,
@@ -53,9 +53,9 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->poll = poll;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->poll)
);
TRACE_EVENT(scmi_xfer_response_wait,
@@ -81,9 +81,9 @@ TRACE_EVENT(scmi_xfer_response_wait,
__entry->poll = poll;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->timeout, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X tmo_ms=%u poll=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->timeout, __entry->poll)
);
TRACE_EVENT(scmi_xfer_end,
@@ -107,9 +107,9 @@ TRACE_EVENT(scmi_xfer_end,
__entry->status = status;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->status)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->status)
);
TRACE_EVENT(scmi_rx_done,
@@ -133,9 +133,9 @@ TRACE_EVENT(scmi_rx_done,
__entry->msg_type = msg_type;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
- __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
- __entry->seq, __entry->msg_type)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X msg_type=%u",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->msg_type)
);
TRACE_EVENT(scmi_msg_dump,
diff --git a/include/trace/events/sof.h b/include/trace/events/sof.h
new file mode 100644
index 000000000000..21c2a1efb9f6
--- /dev/null
+++ b/include/trace/events/sof.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Noah Klayman <noah.klayman@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sof
+
+#if !defined(_TRACE_SOF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOF_H
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include <sound/sof/stream.h>
+#include "../../../sound/soc/sof/sof-audio.h"
+
+DECLARE_EVENT_CLASS(sof_widget_template,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget),
+ TP_STRUCT__entry(
+ __string(name, swidget->widget->name)
+ __field(int, use_count)
+ ),
+ TP_fast_assign(
+ __assign_str(name, swidget->widget->name);
+ __entry->use_count = swidget->use_count;
+ ),
+ TP_printk("name=%s use_count=%d", __get_str(name), __entry->use_count)
+);
+
+DEFINE_EVENT(sof_widget_template, sof_widget_setup,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget)
+);
+
+DEFINE_EVENT(sof_widget_template, sof_widget_free,
+ TP_PROTO(struct snd_sof_widget *swidget),
+ TP_ARGS(swidget)
+);
+
+TRACE_EVENT(sof_ipc3_period_elapsed_position,
+ TP_PROTO(struct snd_sof_dev *sdev, struct sof_ipc_stream_posn *posn),
+ TP_ARGS(sdev, posn),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u64, host_posn)
+ __field(u64, dai_posn)
+ __field(u64, wallclock)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->host_posn = posn->host_posn;
+ __entry->dai_posn = posn->dai_posn;
+ __entry->wallclock = posn->wallclock;
+ ),
+ TP_printk("device_name=%s host_posn=%#llx dai_posn=%#llx wallclock=%#llx",
+ __get_str(device_name), __entry->host_posn, __entry->dai_posn,
+ __entry->wallclock)
+);
+
+TRACE_EVENT(sof_pcm_pointer_position,
+ TP_PROTO(struct snd_sof_dev *sdev,
+ struct snd_sof_pcm *spcm,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t dma_posn,
+ snd_pcm_uframes_t dai_posn
+ ),
+ TP_ARGS(sdev, spcm, substream, dma_posn, dai_posn),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, pcm_id)
+ __field(int, stream)
+ __field(unsigned long, dma_posn)
+ __field(unsigned long, dai_posn)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->pcm_id = le32_to_cpu(spcm->pcm.pcm_id);
+ __entry->stream = substream->stream;
+ __entry->dma_posn = dma_posn;
+ __entry->dai_posn = dai_posn;
+ ),
+ TP_printk("device_name=%s pcm_id=%d stream=%d dma_posn=%lu dai_posn=%lu",
+ __get_str(device_name), __entry->pcm_id, __entry->stream,
+ __entry->dma_posn, __entry->dai_posn)
+);
+
+TRACE_EVENT(sof_stream_position_ipc_rx,
+ TP_PROTO(struct device *dev),
+ TP_ARGS(dev),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(dev))
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(dev));
+ ),
+ TP_printk("device_name=%s", __get_str(device_name))
+);
+
+TRACE_EVENT(sof_ipc4_fw_config,
+ TP_PROTO(struct snd_sof_dev *sdev, char *key, u32 value),
+ TP_ARGS(sdev, key, value),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __string(key, key)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(key, key);
+ __entry->value = value;
+ ),
+ TP_printk("device_name=%s key=%s value=%d",
+ __get_str(device_name), __get_str(key), __entry->value)
+);
+
+#endif /* _TRACE_SOF_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sof_intel.h b/include/trace/events/sof_intel.h
new file mode 100644
index 000000000000..2a77f9d26c0b
--- /dev/null
+++ b/include/trace/events/sof_intel.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2022 Intel Corporation. All rights reserved.
+ *
+ * Author: Noah Klayman <noah.klayman@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sof_intel
+
+#if !defined(_TRACE_SOF_INTEL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOF_INTEL_H
+#include <linux/tracepoint.h>
+#include <sound/hdaudio.h>
+#include "../../../sound/soc/sof/sof-audio.h"
+
+TRACE_EVENT(sof_intel_hda_irq,
+ TP_PROTO(struct snd_sof_dev *sdev, char *source),
+ TP_ARGS(sdev, source),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __string(source, source)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __assign_str(source, source);
+ ),
+ TP_printk("device_name=%s source=%s",
+ __get_str(device_name), __get_str(source))
+);
+
+DECLARE_EVENT_CLASS(sof_intel_ipc_firmware_template,
+ TP_ARGS(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_PROTO(sdev, msg, msg_ext),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, msg)
+ __field(u32, msg_ext)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->msg = msg;
+ __entry->msg_ext = msg_ext;
+ ),
+ TP_printk("device_name=%s msg=%#x msg_ext=%#x",
+ __get_str(device_name), __entry->msg, __entry->msg_ext)
+);
+
+DEFINE_EVENT(sof_intel_ipc_firmware_template, sof_intel_ipc_firmware_response,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_ARGS(sdev, msg, msg_ext)
+);
+
+DEFINE_EVENT(sof_intel_ipc_firmware_template, sof_intel_ipc_firmware_initiated,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 msg, u32 msg_ext),
+ TP_ARGS(sdev, msg, msg_ext)
+);
+
+TRACE_EVENT(sof_intel_D0I3C_updated,
+ TP_PROTO(struct snd_sof_dev *sdev, u8 reg),
+ TP_ARGS(sdev, reg),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u8, reg)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->reg = reg;
+ ),
+ TP_printk("device_name=%s register=%#x",
+ __get_str(device_name), __entry->reg)
+);
+
+TRACE_EVENT(sof_intel_hda_irq_ipc_check,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 irq_status),
+ TP_ARGS(sdev, irq_status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, irq_status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("device_name=%s irq_status=%#x",
+ __get_str(device_name), __entry->irq_status)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_pcm,
+ TP_PROTO(struct snd_sof_dev *sdev,
+ struct hdac_stream *hstream,
+ struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t pos
+ ),
+ TP_ARGS(sdev, hstream, substream, pos),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, hstream_index)
+ __field(u32, substream)
+ __field(unsigned long, pos)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->hstream_index = hstream->index;
+ __entry->substream = substream->stream;
+ __entry->pos = pos;
+ ),
+ TP_printk("device_name=%s hstream_index=%d substream=%d pos=%lu",
+ __get_str(device_name), __entry->hstream_index,
+ __entry->substream, __entry->pos)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_stream_status,
+ TP_PROTO(struct device *dev, struct hdac_stream *s, u32 status),
+ TP_ARGS(dev, s, status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(dev))
+ __field(u32, stream)
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(dev));
+ __entry->stream = s->index;
+ __entry->status = status;
+ ),
+ TP_printk("device_name=%s stream=%d status=%#x",
+ __get_str(device_name), __entry->stream, __entry->status)
+);
+
+TRACE_EVENT(sof_intel_hda_dsp_check_stream_irq,
+ TP_PROTO(struct snd_sof_dev *sdev, u32 status),
+ TP_ARGS(sdev, status),
+ TP_STRUCT__entry(
+ __string(device_name, dev_name(sdev->dev))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device_name, dev_name(sdev->dev));
+ __entry->status = status;
+ ),
+ TP_printk("device_name=%s status=%#x",
+ __get_str(device_name), __entry->status)
+);
+
+#endif /* _TRACE_SOF_INTEL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index 4f3d5aaa11f5..de687009bfe5 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -20,18 +20,18 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
-#define HUGETLB_FLAG_ENCODE_16KB (14 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16KB (14U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_64KB (16U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34U << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 6c1aa92a92e4..6ce1f1ceb432 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -77,6 +77,8 @@
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c2c9c674a223..7ee65c0b4f70 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -755,6 +755,14 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_TOC 0x15
/* Subquery id: Query CAP firmware version */
#define AMDGPU_INFO_FW_CAP 0x16
+ /* Subquery id: Query GFX RLCP firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLCP 0x17
+ /* Subquery id: Query GFX RLCV firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLCV 0x18
+ /* Subquery id: Query MES_KIQ firmware version */
+ #define AMDGPU_INFO_FW_MES_KIQ 0x19
+ /* Subquery id: Query MES firmware version */
+ #define AMDGPU_INFO_FW_MES 0x1a
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0206f812c569..868d6909b718 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -99,18 +99,42 @@ extern "C" {
#define DRM_FORMAT_INVALID 0
/* color index */
+#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
+#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
+#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
-/* 8 bpp Red */
+/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
+
+/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
+#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
+
+/* 1 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
+
+/* 2 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
+
+/* 4 bpp Red (direct relationship between channel value and brightness) */
+#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
+
+/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
-/* 10 bpp Red */
+/* 10 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
-/* 12 bpp Red */
+/* 12 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
-/* 16 bpp Red */
+/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
@@ -205,7 +229,9 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 0a0d56a6158e..fa953309d9ce 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -675,11 +675,11 @@ struct drm_mode_fb_cmd {
* fetch metadata about an existing frame-buffer.
*
* In case of planar formats, this struct allows up to 4 buffer objects with
- * offsets and pitches per plane. The pitch and offset order is dictated by the
- * format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
+ * offsets and pitches per plane. The pitch and offset order are dictated by
+ * the format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as:
*
- * YUV 4:2:0 image with a plane of 8 bit Y samples followed by an
- * interleaved U/V plane containing 8 bit 2x2 subsampled colour difference
+ * YUV 4:2:0 image with a plane of 8-bit Y samples followed by an
+ * interleaved U/V plane containing 8-bit 2x2 subsampled colour difference
* samples.
*
* So it would consist of a Y plane at ``offsets[0]`` and a UV plane at
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 9e40277d8185..eac87310b348 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -224,6 +224,53 @@ struct drm_panfrost_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/* Definitions for coredump decoding in user space */
+#define PANFROSTDUMP_MAJOR 1
+#define PANFROSTDUMP_MINOR 0
+
+#define PANFROSTDUMP_MAGIC 0x464E4150 /* PANF */
+
+#define PANFROSTDUMP_BUF_REG 0
+#define PANFROSTDUMP_BUF_BOMAP (PANFROSTDUMP_BUF_REG + 1)
+#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
+#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
+
+struct panfrost_dump_object_header {
+ __le32 magic;
+ __le32 type;
+ __le32 file_size;
+ __le32 file_offset;
+
+ union {
+ struct pan_reg_hdr {
+ __le64 jc;
+ __le32 gpu_id;
+ __le32 major;
+ __le32 minor;
+ __le64 nbos;
+ } reghdr;
+
+ struct pan_bomap_hdr {
+ __le32 valid;
+ __le64 iova;
+ __le32 data[2];
+ } bomap;
+
+ /*
+ * Force same size in case we want to expand the header
+ * with new fields and also keep it 512-byte aligned
+ */
+
+ __le32 sizer[496];
+ };
+};
+
+/* Registers object, an array of these */
+struct panfrost_dump_registers {
+ __le32 reg;
+ __le32 value;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 59a217ca2dfd..51b9aa640ad2 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -87,10 +87,35 @@ struct bpf_cgroup_storage_key {
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
};
+enum bpf_cgroup_iter_order {
+ BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
+ BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */
+ BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
+ BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
+ BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
+};
+
union bpf_iter_link_info {
struct {
__u32 map_fd;
} map;
+ struct {
+ enum bpf_cgroup_iter_order order;
+
+ /* At most one of cgroup_fd and cgroup_id can be non-zero. If
+ * both are zero, the walk starts from the default cgroup v2
+ * root. For walking v1 hierarchy, one should always explicitly
+ * specify cgroup_fd.
+ */
+ __u32 cgroup_fd;
+ __u64 cgroup_id;
+ } cgroup;
+ /* Parameters of task iterators. */
+ struct {
+ __u32 tid;
+ __u32 pid;
+ __u32 pid_fd;
+ } task;
};
/* BPF syscall commands, see bpf(2) man-page for more details. */
@@ -909,6 +934,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_INODE_STORAGE,
BPF_MAP_TYPE_TASK_STORAGE,
BPF_MAP_TYPE_BLOOM_FILTER,
+ BPF_MAP_TYPE_USER_RINGBUF,
};
/* Note that tracing related programs such as
@@ -1233,7 +1259,7 @@ enum {
/* Query effective (directly attached + inherited from ancestor cgroups)
* programs that will be executed for events within a cgroup.
- * attach_flags with this flag are returned only for directly attached programs.
+ * attach_flags with this flag are always returned 0.
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
@@ -1432,7 +1458,10 @@ union bpf_attr {
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
- __aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
+ /* output: per-program attach_flags.
+ * not allowed to be set during effective query.
+ */
+ __aligned_u64 prog_attach_flags;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -2573,10 +2602,12 @@ union bpf_attr {
* There are two supported modes at this time:
*
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
- * (room space is added or removed below the layer 2 header).
+ * (room space is added or removed between the layer 2 and
+ * layer 3 headers).
*
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
- * (room space is added or removed below the layer 3 header).
+ * (room space is added or removed between the layer 3 and
+ * layer 4 headers).
*
* The following flags are supported at this time:
*
@@ -3008,8 +3039,18 @@ union bpf_attr {
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* **BPF_F_USER_BUILD_ID**
- * Collect buildid+offset instead of ips for user stack,
- * only valid if **BPF_F_USER_STACK** is also specified.
+ * Collect (build_id, file_offset) instead of ips for user
+ * stack, only valid if **BPF_F_USER_STACK** is also
+ * specified.
+ *
+ * *file_offset* is an offset relative to the beginning
+ * of the executable or shared object file backing the vma
+ * which the *ip* falls in. It is *not* an offset relative
+ * to that object's base address. Accordingly, it must be
+ * adjusted by adding (sh_addr - sh_offset), where
+ * sh_{addr,offset} correspond to the executable section
+ * containing *file_offset* in the object, for comparisons
+ * to symbols' st_value to be valid.
*
* **bpf_get_stack**\ () can collect up to
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
@@ -4425,7 +4466,7 @@ union bpf_attr {
*
* **-EEXIST** if the option already exists.
*
- * **-EFAULT** on failrue to parse the existing header options.
+ * **-EFAULT** on failure to parse the existing header options.
*
* **-EPERM** if the helper cannot be used under the current
* *skops*\ **->op**.
@@ -4634,7 +4675,7 @@ union bpf_attr {
* a *map* with *task* as the **key**. From this
* perspective, the usage is not much different from
* **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
- * helper enforces the key must be an task_struct and the map must also
+ * helper enforces the key must be a task_struct and the map must also
* be a **BPF_MAP_TYPE_TASK_STORAGE**.
*
* Underneath, the value is stored locally at *task* instead of
@@ -4692,7 +4733,7 @@ union bpf_attr {
*
* long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
* Description
- * Returns the stored IMA hash of the *inode* (if it's avaialable).
+ * Returns the stored IMA hash of the *inode* (if it's available).
* If the hash is larger than *size*, then only *size*
* bytes will be copied to *dst*
* Return
@@ -4716,12 +4757,12 @@ union bpf_attr {
*
* The argument *len_diff* can be used for querying with a planned
* size change. This allows to check MTU prior to changing packet
- * ctx. Providing an *len_diff* adjustment that is larger than the
+ * ctx. Providing a *len_diff* adjustment that is larger than the
* actual packet size (resulting in negative packet size) will in
- * principle not exceed the MTU, why it is not considered a
- * failure. Other BPF-helpers are needed for performing the
- * planned size change, why the responsability for catch a negative
- * packet size belong in those helpers.
+ * principle not exceed the MTU, which is why it is not considered
+ * a failure. Other BPF helpers are needed for performing the
+ * planned size change; therefore the responsibility for catching
+ * a negative packet size belongs in those helpers.
*
* Specifying *ifindex* zero means the MTU check is performed
* against the current net device. This is practical if this isn't
@@ -4919,6 +4960,7 @@ union bpf_attr {
* Get address of the traced function (for tracing and kprobe programs).
* Return
* Address of the traced function.
+ * 0 for kprobes placed within the function (not at the entry).
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
@@ -5048,12 +5090,12 @@ union bpf_attr {
*
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
* Description
- * Get **n**-th argument (zero based) of the traced function (for tracing programs)
+ * Get **n**-th argument register (zero based) of the traced function (for tracing programs)
* returned in **value**.
*
* Return
* 0 on success.
- * **-EINVAL** if n >= arguments count of traced function.
+ * **-EINVAL** if n >= argument register count of traced function.
*
* long bpf_get_func_ret(void *ctx, u64 *value)
* Description
@@ -5066,24 +5108,37 @@ union bpf_attr {
*
* long bpf_get_func_arg_cnt(void *ctx)
* Description
- * Get number of arguments of the traced function (for tracing programs).
+ * Get number of registers of the traced function (for tracing programs) where
+ * function arguments are stored in these registers.
*
* Return
- * The number of arguments of the traced function.
+ * The number of argument registers of the traced function.
*
* int bpf_get_retval(void)
* Description
- * Get the syscall's return value that will be returned to userspace.
+ * Get the BPF program's return value that will be returned to the upper layers.
*
- * This helper is currently supported by cgroup programs only.
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
* Return
- * The syscall's return value.
+ * The BPF program's return value.
*
* int bpf_set_retval(int retval)
* Description
- * Set the syscall's return value that will be returned to userspace.
+ * Set the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ *
+ * Note that there is the following corner case where the program exports an error
+ * via bpf_set_retval but signals success via 'return 1':
+ *
+ * bpf_set_retval(-EPERM);
+ * return 1;
+ *
+ * In this case, the BPF program's return value will use helper's -EPERM. This
+ * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
*
- * This helper is currently supported by cgroup programs only.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -5331,6 +5386,55 @@ union bpf_attr {
* **-EACCES** if the SYN cookie is not valid.
*
* **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * u64 bpf_ktime_get_tai_ns(void)
+ * Description
+ * A nonsettable system-wide clock derived from wall-clock time but
+ * ignoring leap seconds. This clock does not experience
+ * discontinuities and backwards jumps caused by NTP inserting leap
+ * seconds as CLOCK_REALTIME does.
+ *
+ * See: **clock_gettime**\ (**CLOCK_TAI**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
+ * Description
+ * Drain samples from the specified user ring buffer, and invoke
+ * the provided callback for each such sample:
+ *
+ * long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx);
+ *
+ * If **callback_fn** returns 0, the helper will continue to try
+ * and drain the next sample, up to a maximum of
+ * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
+ * the helper will skip the rest of the samples and return. Other
+ * return values are not used now, and will be rejected by the
+ * verifier.
+ * Return
+ * The number of drained samples if no error was encountered while
+ * draining samples, or 0 if no samples were present in the ring
+ * buffer. If a user-space producer was epoll-waiting on this map,
+ * and at least one sample was drained, they will receive an event
+ * notification notifying them of available space in the ring
+ * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
+ * function, no wakeup notification will be sent. If the
+ * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
+ * be sent even if no sample was drained.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EBUSY** if the ring buffer is contended, and another calling
+ * context was concurrently draining the ring buffer.
+ *
+ * **-EINVAL** if user-space is not properly tracking the ring
+ * buffer due to the producer position not being aligned to 8
+ * bytes, a sample not being aligned to 8 bytes, or the producer
+ * position not matching the advertised length of a sample.
+ *
+ * **-E2BIG** if user-space has tried to publish a sample which is
+ * larger than the size of the ring buffer, or which cannot fit
+ * within a struct bpf_dynptr.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5541,6 +5645,8 @@ union bpf_attr {
FN(tcp_raw_gen_syncookie_ipv6), \
FN(tcp_raw_check_syncookie_ipv4), \
FN(tcp_raw_check_syncookie_ipv6), \
+ FN(ktime_get_tai_ns), \
+ FN(user_ringbuf_drain), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5603,6 +5709,11 @@ enum {
BPF_F_SEQ_NUMBER = (1ULL << 3),
};
+/* BPF_FUNC_skb_get_tunnel_key flags. */
+enum {
+ BPF_F_TUNINFO_FLAGS = (1ULL << 4),
+};
+
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
@@ -5792,7 +5903,10 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
- __u16 tunnel_ext; /* Padding, future use. */
+ union {
+ __u16 tunnel_ext; /* compat */
+ __be16 tunnel_flags;
+ };
__u32 tunnel_label;
union {
__u32 local_ipv4;
@@ -5836,6 +5950,11 @@ enum bpf_ret_code {
* represented by BPF_REDIRECT above).
*/
BPF_LWT_REROUTE = 128,
+ /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
+ * to indicate that no custom dissection was performed, and
+ * fallback to standard dissector is requested.
+ */
+ BPF_FLOW_DISSECTOR_CONTINUE = 129,
};
struct bpf_sock {
@@ -6134,11 +6253,26 @@ struct bpf_link_info {
struct {
__aligned_u64 target_name; /* in/out: target_name buffer ptr */
__u32 target_name_len; /* in/out: target_name buffer len */
+
+ /* If the iter specific field is 32 bits, it can be put
+ * in the first or second union. Otherwise it should be
+ * put in the second union.
+ */
union {
struct {
__u32 map_id;
} map;
};
+ union {
+ struct {
+ __u64 cgroup_id;
+ __u32 order;
+ } cgroup;
+ struct {
+ __u32 tid;
+ __u32 pid;
+ } task;
+ };
} iter;
struct {
__u32 netns_ino;
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 7ada84e4a3ed..5655e89b962b 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -290,6 +290,12 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1)
#define BTRFS_FEATURE_COMPAT_RO_VERITY (1ULL << 2)
+/*
+ * Put all block group items into a dedicated block group tree, greatly
+ * reducing mount time for large filesystem due to better locality.
+ */
+#define BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE (1ULL << 3)
+
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 5f32a2a495dc..1f7a38ec6ac3 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -965,6 +965,10 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
+ BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
+
#define BTRFS_QGROUP_STATUS_VERSION 1
struct btrfs_qgroup_status_item {
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 90801ada2bbe..dd645ea72306 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -48,6 +48,7 @@
#include <linux/types.h>
#include <linux/socket.h>
+#include <linux/stddef.h> /* for offsetof */
/* controller area network (CAN) kernel definitions */
@@ -60,6 +61,7 @@
#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
#define CAN_EFF_MASK 0x1FFFFFFFU /* extended frame format (EFF) */
#define CAN_ERR_MASK 0x1FFFFFFFU /* omit EFF, RTR, ERR flags */
+#define CANXL_PRIO_MASK CAN_SFF_MASK /* 11 bit priority mask */
/*
* Controller Area Network Identifier structure
@@ -73,6 +75,7 @@ typedef __u32 canid_t;
#define CAN_SFF_ID_BITS 11
#define CAN_EFF_ID_BITS 29
+#define CANXL_PRIO_BITS CAN_SFF_ID_BITS
/*
* Controller Area Network Error Message Frame Mask structure
@@ -91,6 +94,16 @@ typedef __u32 can_err_mask_t;
#define CANFD_MAX_DLC 15
#define CANFD_MAX_DLEN 64
+/*
+ * CAN XL payload length and DLC definitions according to ISO 11898-1
+ * CAN XL DLC ranges from 0 .. 2047 => data length from 1 .. 2048 byte
+ */
+#define CANXL_MIN_DLC 0
+#define CANXL_MAX_DLC 2047
+#define CANXL_MAX_DLC_MASK 0x07FF
+#define CANXL_MIN_DLEN 1
+#define CANXL_MAX_DLEN 2048
+
/**
* struct can_frame - Classical CAN frame structure (aka CAN 2.0B)
* @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
@@ -141,8 +154,8 @@ struct can_frame {
* When this is done the former differentiation via CAN_MTU / CANFD_MTU gets
* lost. CANFD_FDF allows programmers to mark CAN FD frames in the case of
* using struct canfd_frame for mixed CAN / CAN FD content (dual use).
- * N.B. the Kernel APIs do NOT provide mixed CAN / CAN FD content inside of
- * struct canfd_frame therefore the CANFD_FDF flag is disregarded by Linux.
+ * Since the introduction of CAN XL the CANFD_FDF flag is set in all CAN FD
+ * frame structures provided by the CAN subsystem of the Linux kernel.
*/
#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
@@ -166,8 +179,46 @@ struct canfd_frame {
__u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8)));
};
+/*
+ * defined bits for canxl_frame.flags
+ *
+ * The canxl_frame.flags element contains two bits CANXL_XLF and CANXL_SEC
+ * and shares the relative position of the struct can[fd]_frame.len element.
+ * The CANXL_XLF bit ALWAYS needs to be set to indicate a valid CAN XL frame.
+ * As a side effect setting this bit intentionally breaks the length checks
+ * for Classical CAN and CAN FD frames.
+ *
+ * Undefined bits in canxl_frame.flags are reserved and shall be set to zero.
+ */
+#define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */
+#define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */
+
+/**
+ * struct canxl_frame - CAN with e'X'tended frame 'L'ength frame structure
+ * @prio: 11 bit arbitration priority with zero'ed CAN_*_FLAG flags
+ * @flags: additional flags for CAN XL
+ * @sdt: SDU (service data unit) type
+ * @len: frame payload length in byte (CANXL_MIN_DLEN .. CANXL_MAX_DLEN)
+ * @af: acceptance field
+ * @data: CAN XL frame payload (CANXL_MIN_DLEN .. CANXL_MAX_DLEN byte)
+ *
+ * @prio shares the same position as @can_id from struct can[fd]_frame.
+ */
+struct canxl_frame {
+ canid_t prio; /* 11 bit priority for arbitration (canid_t) */
+ __u8 flags; /* additional flags for CAN XL */
+ __u8 sdt; /* SDU (service data unit) type */
+ __u16 len; /* frame payload length in byte */
+ __u32 af; /* acceptance field */
+ __u8 data[CANXL_MAX_DLEN];
+};
+
#define CAN_MTU (sizeof(struct can_frame))
#define CANFD_MTU (sizeof(struct canfd_frame))
+#define CANXL_MTU (sizeof(struct canxl_frame))
+#define CANXL_HDR_SIZE (offsetof(struct canxl_frame, data))
+#define CANXL_MIN_MTU (CANXL_HDR_SIZE + 64)
+#define CANXL_MAX_MTU CANXL_MTU
/* particular protocols of the protocol family PF_CAN */
#define CAN_RAW 1 /* RAW sockets */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 3386aa81fdf2..ff12f525c37c 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -62,6 +62,7 @@ enum {
CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
CAN_RAW_JOIN_FILTERS, /* all filters must match to trigger */
+ CAN_RAW_XL_FRAMES, /* allow CAN XL frames (default:off) */
};
#endif /* !_UAPI_CAN_RAW_H */
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
index 96c5ffd368ad..8ab12d731e3b 100644
--- a/include/uapi/linux/counter.h
+++ b/include/uapi/linux/counter.h
@@ -63,6 +63,8 @@ enum counter_event_type {
COUNTER_EVENT_INDEX,
/* State of counter is changed */
COUNTER_EVENT_CHANGE_OF_STATE,
+ /* Count value captured */
+ COUNTER_EVENT_CAPTURE,
};
/**
@@ -153,4 +155,10 @@ enum counter_synapse_action {
COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
};
+/* Signal polarity values */
+enum counter_signal_polarity {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
#endif /* _UAPI_COUNTER_H_ */
diff --git a/include/uapi/linux/dlm.h b/include/uapi/linux/dlm.h
index 0d2eca287567..1923f4f3b05e 100644
--- a/include/uapi/linux/dlm.h
+++ b/include/uapi/linux/dlm.h
@@ -69,7 +69,6 @@ struct dlm_lksb {
/* dlm_new_lockspace() flags */
#define DLM_LSFL_TIMEWARN 0x00000002
-#define DLM_LSFL_FS 0x00000004
#define DLM_LSFL_NEWEXCL 0x00000008
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h
deleted file mode 100644
index 36ca71bd8bbe..000000000000
--- a/include/uapi/linux/dn.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _LINUX_DN_H
-#define _LINUX_DN_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/if_ether.h>
-
-/*
-
- DECnet Data Structures and Constants
-
-*/
-
-/*
- * DNPROTO_NSP can't be the same as SOL_SOCKET,
- * so increment each by one (compared to ULTRIX)
- */
-#define DNPROTO_NSP 2 /* NSP protocol number */
-#define DNPROTO_ROU 3 /* Routing protocol number */
-#define DNPROTO_NML 4 /* Net mgt protocol number */
-#define DNPROTO_EVL 5 /* Evl protocol number (usr) */
-#define DNPROTO_EVR 6 /* Evl protocol number (evl) */
-#define DNPROTO_NSPT 7 /* NSP trace protocol number */
-
-
-#define DN_ADDL 2
-#define DN_MAXADDL 2 /* ULTRIX headers have 20 here, but pathworks has 2 */
-#define DN_MAXOPTL 16
-#define DN_MAXOBJL 16
-#define DN_MAXACCL 40
-#define DN_MAXALIASL 128
-#define DN_MAXNODEL 256
-#define DNBUFSIZE 65023
-
-/*
- * SET/GET Socket options - must match the DSO_ numbers below
- */
-#define SO_CONDATA 1
-#define SO_CONACCESS 2
-#define SO_PROXYUSR 3
-#define SO_LINKINFO 7
-
-#define DSO_CONDATA 1 /* Set/Get connect data */
-#define DSO_DISDATA 10 /* Set/Get disconnect data */
-#define DSO_CONACCESS 2 /* Set/Get connect access data */
-#define DSO_ACCEPTMODE 4 /* Set/Get accept mode */
-#define DSO_CONACCEPT 5 /* Accept deferred connection */
-#define DSO_CONREJECT 6 /* Reject deferred connection */
-#define DSO_LINKINFO 7 /* Set/Get link information */
-#define DSO_STREAM 8 /* Set socket type to stream */
-#define DSO_SEQPACKET 9 /* Set socket type to sequenced packet */
-#define DSO_MAXWINDOW 11 /* Maximum window size allowed */
-#define DSO_NODELAY 12 /* Turn off nagle */
-#define DSO_CORK 13 /* Wait for more data! */
-#define DSO_SERVICES 14 /* NSP Services field */
-#define DSO_INFO 15 /* NSP Info field */
-#define DSO_MAX 15 /* Maximum option number */
-
-
-/* LINK States */
-#define LL_INACTIVE 0
-#define LL_CONNECTING 1
-#define LL_RUNNING 2
-#define LL_DISCONNECTING 3
-
-#define ACC_IMMED 0
-#define ACC_DEFER 1
-
-#define SDF_WILD 1 /* Wild card object */
-#define SDF_PROXY 2 /* Addr eligible for proxy */
-#define SDF_UICPROXY 4 /* Use uic-based proxy */
-
-/* Structures */
-
-
-struct dn_naddr {
- __le16 a_len;
- __u8 a_addr[DN_MAXADDL]; /* Two bytes little endian */
-};
-
-struct sockaddr_dn {
- __u16 sdn_family;
- __u8 sdn_flags;
- __u8 sdn_objnum;
- __le16 sdn_objnamel;
- __u8 sdn_objname[DN_MAXOBJL];
- struct dn_naddr sdn_add;
-};
-#define sdn_nodeaddrl sdn_add.a_len /* Node address length */
-#define sdn_nodeaddr sdn_add.a_addr /* Node address */
-
-
-
-/*
- * DECnet set/get DSO_CONDATA, DSO_DISDATA (optional data) structure
- */
-struct optdata_dn {
- __le16 opt_status; /* Extended status return */
-#define opt_sts opt_status
- __le16 opt_optl; /* Length of user data */
- __u8 opt_data[16]; /* User data */
-};
-
-struct accessdata_dn {
- __u8 acc_accl;
- __u8 acc_acc[DN_MAXACCL];
- __u8 acc_passl;
- __u8 acc_pass[DN_MAXACCL];
- __u8 acc_userl;
- __u8 acc_user[DN_MAXACCL];
-};
-
-/*
- * DECnet logical link information structure
- */
-struct linkinfo_dn {
- __u16 idn_segsize; /* Segment size for link */
- __u8 idn_linkstate; /* Logical link state */
-};
-
-/*
- * Ethernet address format (for DECnet)
- */
-union etheraddress {
- __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */
- struct {
- __u8 dne_hiord[4]; /* DECnet HIORD prefix */
- __u8 dne_nodeaddr[2]; /* DECnet node address */
- } dne_remote;
-};
-
-
-/*
- * DECnet physical socket address format
- */
-struct dn_addr {
- __le16 dna_family; /* AF_DECnet */
- union etheraddress dna_netaddr; /* DECnet ethernet address */
-};
-
-#define DECNET_IOCTL_BASE 0x89 /* PROTOPRIVATE range */
-
-#define SIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, struct dn_naddr)
-#define SIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, struct dn_naddr)
-#define OSIOCSNETADDR _IOW(DECNET_IOCTL_BASE, 0xe0, int)
-#define OSIOCGNETADDR _IOR(DECNET_IOCTL_BASE, 0xe1, int)
-
-#endif /* _LINUX_DN_H */
diff --git a/include/uapi/linux/dw100.h b/include/uapi/linux/dw100.h
new file mode 100644
index 000000000000..3356496edd6b
--- /dev/null
+++ b/include/uapi/linux/dw100.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/* Copyright 2022 NXP */
+
+#ifndef __UAPI_DW100_H__
+#define __UAPI_DW100_H__
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/dw100.rst for control details.
+ */
+#define V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP (V4L2_CID_USER_DW100_BASE + 1)
+
+#endif
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2d5741fd44bb..dc2aa3d75b39 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -737,6 +737,51 @@ enum ethtool_module_power_mode {
};
/**
+ * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN: state of PoDL PSE functions are
+ * unknown
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: PoDL PSE functions are disabled
+ * @ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: PoDL PSE functions are enabled
+ */
+enum ethtool_podl_pse_admin_state {
+ ETHTOOL_PODL_PSE_ADMIN_STATE_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED,
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED,
+};
+
+/**
+ * enum ethtool_podl_pse_pw_d_status - power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN: PoDL PSE
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED: "The enumeration “disabled” is
+ * asserted true when the PoDL PSE state diagram variable mr_pse_enable is
+ * false"
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING: "The enumeration “searching” is
+ * asserted true when either of the PSE state diagram variables
+ * pi_detecting or pi_classifying is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING: "The enumeration “deliveringPower”
+ * is asserted true when the PoDL PSE state diagram variable pi_powered is
+ * true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP: "The enumeration “sleep” is asserted
+ * true when the PoDL PSE state diagram variable pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE: "The enumeration “idle” is asserted true
+ * when the logical combination of the PoDL PSE state diagram variables
+ * pi_prebiased*!pi_sleeping is true."
+ * @ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR: "The enumeration “error” is asserted
+ * true when the PoDL PSE state diagram variable overload_held is true."
+ */
+enum ethtool_podl_pse_pw_d_status {
+ ETHTOOL_PODL_PSE_PW_D_STATUS_UNKNOWN = 1,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SEARCHING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_SLEEP,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_IDLE,
+ ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR,
+};
+
+/**
* struct ethtool_gstrings - string set for data tagging
* @cmd: Command number = %ETHTOOL_GSTRINGS
* @string_set: String set ID; one of &enum ethtool_stringset
@@ -1840,6 +1885,20 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define MASTER_SLAVE_STATE_SLAVE 3
#define MASTER_SLAVE_STATE_ERR 4
+/* These are used to throttle the rate of data on the phy interface when the
+ * native speed of the interface is higher than the link speed. These should
+ * not be used for phy interfaces which natively support multiple speeds (e.g.
+ * MII or SGMII).
+ */
+/* No rate matching performed. */
+#define RATE_MATCH_NONE 0
+/* The phy sends pause frames to throttle the MAC. */
+#define RATE_MATCH_PAUSE 1
+/* The phy asserts CRS to prevent the MAC from transmitting. */
+#define RATE_MATCH_CRS 2
+/* The MAC is programmed with a sufficiently-large IPG. */
+#define RATE_MATCH_OPEN_LOOP 3
+
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_AUI 0x01
@@ -2033,8 +2092,8 @@ enum ethtool_reset_flags {
* reported consistently by PHYLIB. Read-only.
* @master_slave_cfg: Master/slave port mode.
* @master_slave_state: Master/slave port state.
+ * @rate_matching: Rate adaptation performed by the PHY
* @reserved: Reserved for future use; see the note on reserved space.
- * @reserved1: Reserved for future use; see the note on reserved space.
* @link_mode_masks: Variable length bitmaps.
*
* If autonegotiation is disabled, the speed and @duplex represent the
@@ -2085,7 +2144,7 @@ struct ethtool_link_settings {
__u8 transceiver;
__u8 master_slave_cfg;
__u8 master_slave_state;
- __u8 reserved1[1];
+ __u8 rate_matching;
__u32 reserved[7];
__u32 link_mode_masks[];
/* layout of link_mode_masks fields:
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index d2fb4f7be61b..bb57084ac524 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -49,6 +49,8 @@ enum {
ETHTOOL_MSG_PHC_VCLOCKS_GET,
ETHTOOL_MSG_MODULE_GET,
ETHTOOL_MSG_MODULE_SET,
+ ETHTOOL_MSG_PSE_GET,
+ ETHTOOL_MSG_PSE_SET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -94,6 +96,7 @@ enum {
ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY,
ETHTOOL_MSG_MODULE_GET_REPLY,
ETHTOOL_MSG_MODULE_NTF,
+ ETHTOOL_MSG_PSE_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -242,6 +245,7 @@ enum {
ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */
ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */
ETHTOOL_A_LINKMODES_LANES, /* u32 */
+ ETHTOOL_A_LINKMODES_RATE_MATCHING, /* u8 */
/* add new constants above here */
__ETHTOOL_A_LINKMODES_CNT,
@@ -862,6 +866,19 @@ enum {
ETHTOOL_A_MODULE_MAX = (__ETHTOOL_A_MODULE_CNT - 1)
};
+/* Power Sourcing Equipment */
+enum {
+ ETHTOOL_A_PSE_UNSPEC,
+ ETHTOOL_A_PSE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */
+ ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */
+ ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_PSE_CNT,
+ ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index d6ccee961891..76ee8f9e024a 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -194,6 +194,9 @@
* - add FUSE_SECURITY_CTX init flag
* - add security context to create, mkdir, symlink, and mknod requests
* - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX
+ *
+ * 7.37
+ * - add FUSE_TMPFILE
*/
#ifndef _LINUX_FUSE_H
@@ -229,7 +232,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 36
+#define FUSE_KERNEL_MINOR_VERSION 37
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -537,6 +540,7 @@ enum fuse_opcode {
FUSE_SETUPMAPPING = 48,
FUSE_REMOVEMAPPING = 49,
FUSE_SYNCFS = 50,
+ FUSE_TMPFILE = 51,
/* CUSE specific operations */
CUSE_INIT = 4096,
diff --git a/include/uapi/linux/hid.h b/include/uapi/linux/hid.h
index b34492a87a8a..a4dcb34386e3 100644
--- a/include/uapi/linux/hid.h
+++ b/include/uapi/linux/hid.h
@@ -43,15 +43,29 @@
#define USB_INTERFACE_PROTOCOL_MOUSE 2
/*
+ * HID report types --- Ouch! HID spec says 1 2 3!
+ */
+
+enum hid_report_type {
+ HID_INPUT_REPORT = 0,
+ HID_OUTPUT_REPORT = 1,
+ HID_FEATURE_REPORT = 2,
+
+ HID_REPORT_TYPES,
+};
+
+/*
* HID class requests
*/
-#define HID_REQ_GET_REPORT 0x01
-#define HID_REQ_GET_IDLE 0x02
-#define HID_REQ_GET_PROTOCOL 0x03
-#define HID_REQ_SET_REPORT 0x09
-#define HID_REQ_SET_IDLE 0x0A
-#define HID_REQ_SET_PROTOCOL 0x0B
+enum hid_class_request {
+ HID_REQ_GET_REPORT = 0x01,
+ HID_REQ_GET_IDLE = 0x02,
+ HID_REQ_GET_PROTOCOL = 0x03,
+ HID_REQ_SET_REPORT = 0x09,
+ HID_REQ_SET_IDLE = 0x0A,
+ HID_REQ_SET_PROTOCOL = 0x0B,
+};
/*
* HID class descriptor types
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index d370165bc621..69e0457eb200 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -138,6 +138,7 @@
#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */
#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/
+#define ETH_P_CANXL 0x000E /* CANXL: eXtended frame Length */
#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index e36d9d2c65a7..5e7a1041df3a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -370,6 +370,7 @@ enum {
IFLA_GRO_MAX_SIZE,
IFLA_TSO_MAX_SIZE,
IFLA_TSO_MAX_SEGS,
+ IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */
__IFLA_MAX
};
@@ -694,6 +695,7 @@ enum {
IFLA_XFRM_UNSPEC,
IFLA_XFRM_LINK,
IFLA_XFRM_IF_ID,
+ IFLA_XFRM_COLLECT_METADATA,
__IFLA_XFRM_MAX
};
@@ -1374,4 +1376,14 @@ enum {
#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+/* DSA section */
+
+enum {
+ IFLA_DSA_UNSPEC,
+ IFLA_DSA_MASTER,
+ __IFLA_DSA_MAX,
+};
+
+#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 3af2aa069a36..d5b6d1f37353 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -22,6 +22,8 @@
#define MACSEC_KEYID_LEN 16
+#define MACSEC_SALT_LEN 12
+
/* cipher IDs as per IEEE802.1AE-2018 (Table 14-1) */
#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 2ec07de1d73b..b6d7b868f290 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -67,6 +67,8 @@
#define IFF_TAP 0x0002
#define IFF_NAPI 0x0010
#define IFF_NAPI_FRAGS 0x0020
+/* Used in TUNSETIFF to bring up tun/tap without carrier */
+#define IFF_NO_CARRIER 0x0040
#define IFF_NO_PI 0x1000
/* This flag has no real effect */
#define IFF_ONE_QUEUE 0x2000
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 472cead10d8d..c79f2f046a0b 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -95,6 +95,12 @@ enum iio_modifier {
IIO_MOD_ETHANOL,
IIO_MOD_H2,
IIO_MOD_O2,
+ IIO_MOD_LINEAR_X,
+ IIO_MOD_LINEAR_Y,
+ IIO_MOD_LINEAR_Z,
+ IIO_MOD_PITCH,
+ IIO_MOD_YAW,
+ IIO_MOD_ROLL,
};
enum iio_event_type {
@@ -105,6 +111,7 @@ enum iio_event_type {
IIO_EV_TYPE_MAG_ADAPTIVE,
IIO_EV_TYPE_CHANGE,
IIO_EV_TYPE_MAG_REFERENCED,
+ IIO_EV_TYPE_GESTURE,
};
enum iio_event_direction {
@@ -112,7 +119,8 @@ enum iio_event_direction {
IIO_EV_DIR_RISING,
IIO_EV_DIR_FALLING,
IIO_EV_DIR_NONE,
+ IIO_EV_DIR_SINGLETAP,
+ IIO_EV_DIR_DOUBLETAP,
};
#endif /* _UAPI_IIO_TYPES_H_ */
-
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 14168225cecd..f243ce665f74 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -68,6 +68,8 @@ enum {
#define IPPROTO_PIM IPPROTO_PIM
IPPROTO_COMP = 108, /* Compression Header Protocol */
#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_L2TP = 115, /* Layer 2 Tunnelling Protocol */
+#define IPPROTO_L2TP IPPROTO_L2TP
IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
@@ -188,21 +190,13 @@ struct ip_mreq_source {
};
struct ip_msfilter {
+ __be32 imsf_multiaddr;
+ __be32 imsf_interface;
+ __u32 imsf_fmode;
+ __u32 imsf_numsrc;
union {
- struct {
- __be32 imsf_multiaddr_aux;
- __be32 imsf_interface_aux;
- __u32 imsf_fmode_aux;
- __u32 imsf_numsrc_aux;
- __be32 imsf_slist[1];
- };
- struct {
- __be32 imsf_multiaddr;
- __be32 imsf_interface;
- __u32 imsf_fmode;
- __u32 imsf_numsrc;
- __be32 imsf_slist_flex[];
- };
+ __be32 imsf_slist[1];
+ __DECLARE_FLEX_ARRAY(__be32, imsf_slist_flex);
};
};
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index dff8e7f17074..7ad931a32970 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -862,6 +862,7 @@
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
+#define ABS_PROFILE 0x21
#define ABS_MISC 0x28
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 6b83177fd41d..ab7458033ee3 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -56,6 +56,7 @@ struct io_uring_sqe {
__u32 hardlink_flags;
__u32 xattr_flags;
__u32 msg_ring_flags;
+ __u32 uring_cmd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -157,6 +158,13 @@ enum {
*/
#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
+/*
+ * Defer running task work to get events.
+ * Rather than running bits of task work whenever the task transitions
+ * try to do it just before it is needed.
+ */
+#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -206,12 +214,21 @@ enum io_uring_op {
IORING_OP_SOCKET,
IORING_OP_URING_CMD,
IORING_OP_SEND_ZC,
+ IORING_OP_SENDMSG_ZC,
/* this goes last, obviously */
IORING_OP_LAST,
};
/*
+ * sqe->uring_cmd_flags
+ * IORING_URING_CMD_FIXED use registered buffer; pass thig flag
+ * along with setting sqe->buf_index.
+ */
+#define IORING_URING_CMD_FIXED (1U << 0)
+
+
+/*
* sqe->fsync_flags
*/
#define IORING_FSYNC_DATASYNC (1U << 0)
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index eed0315a77a6..0d5d4419139a 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1177,6 +1177,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
#define KVM_CAP_S390_ZPCI_OP 221
#define KVM_CAP_S390_CPU_TOPOLOGY 222
+#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index bab8c9708611..7d81c3e1ec29 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -13,8 +13,6 @@
#include <linux/in.h>
#include <linux/in6.h>
-#define IPPROTO_L2TP 115
-
/**
* struct sockaddr_l2tpip - the sockaddr structure for L2TP-over-IP sockets
* @l2tp_family: address family number AF_L2TPIP.
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 23df4e0e8ace..9c4bcc37a455 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -26,7 +26,7 @@ struct landlock_ruleset_attr {
* Landlock filesystem access rights that are not part of
* handled_access_fs are allowed. This is needed for backward
* compatibility reasons. One exception is the
- * LANDLOCK_ACCESS_FS_REFER access right, which is always implicitly
+ * %LANDLOCK_ACCESS_FS_REFER access right, which is always implicitly
* handled, but must still be explicitly handled to add new rules with
* this access right.
*/
@@ -128,11 +128,11 @@ struct landlock_path_beneath_attr {
* hierarchy must also always have the same or a superset of restrictions of
* the source hierarchy. If it is not the case, or if the domain doesn't
* handle this access right, such actions are denied by default with errno
- * set to EXDEV. Linking also requires a LANDLOCK_ACCESS_FS_MAKE_* access
- * right on the destination directory, and renaming also requires a
- * LANDLOCK_ACCESS_FS_REMOVE_* access right on the source's (file or
+ * set to ``EXDEV``. Linking also requires a ``LANDLOCK_ACCESS_FS_MAKE_*``
+ * access right on the destination directory, and renaming also requires a
+ * ``LANDLOCK_ACCESS_FS_REMOVE_*`` access right on the source's (file or
* directory) parent. Otherwise, such actions are denied with errno set to
- * EACCES. The EACCES errno prevails over EXDEV to let user space
+ * ``EACCES``. The ``EACCES`` errno prevails over ``EXDEV`` to let user space
* efficiently deal with an unrecoverable error.
*
* .. warning::
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 2e206919125c..229655ef792f 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -15,6 +15,7 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_SEG6_LOCAL,
LWTUNNEL_ENCAP_RPL,
LWTUNNEL_ENCAP_IOAM6,
+ LWTUNNEL_ENCAP_XFRM,
__LWTUNNEL_ENCAP_MAX,
};
@@ -111,4 +112,13 @@ enum {
#define LWT_BPF_MAX_HEADROOM 256
+enum {
+ LWT_XFRM_UNSPEC,
+ LWT_XFRM_IF_ID,
+ LWT_XFRM_LINK,
+ __LWT_XFRM_MAX,
+};
+
+#define LWT_XFRM_MAX (__LWT_XFRM_MAX - 1)
+
#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index 53411ccc69db..5a79ccb76701 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -63,7 +63,9 @@ enum {
NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
+#ifndef __KERNEL__ /* no longer supported by kernel */
NFPROTO_DECNET = 12,
+#endif
NFPROTO_NUMPROTO,
};
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 6397d75899bc..79e5d68b87af 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -3,10 +3,6 @@
* Patrick Schaaf <bof@bof.de>
* Martin Josefsson <gandalf@wlug.westbo.se>
* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _UAPI_IP_SET_H
#define _UAPI_IP_SET_H
diff --git a/include/uapi/linux/netfilter/xt_AUDIT.h b/include/uapi/linux/netfilter/xt_AUDIT.h
index 1b314e2f84ac..56a3f6092e0c 100644
--- a/include/uapi/linux/netfilter/xt_AUDIT.h
+++ b/include/uapi/linux/netfilter/xt_AUDIT.h
@@ -4,10 +4,6 @@
*
* (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
* (C) 2010-2011 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _XT_AUDIT_TARGET_H
diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h
index f01c19b83a2b..41b578ccd03b 100644
--- a/include/uapi/linux/netfilter/xt_connmark.h
+++ b/include/uapi/linux/netfilter/xt_connmark.h
@@ -1,18 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ */
+
#ifndef _XT_CONNMARK_H
#define _XT_CONNMARK_H
#include <linux/types.h>
-/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
enum {
XT_CONNMARK_SET = 0,
XT_CONNMARK_SAVE,
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 6e466236ca4b..f1f097896bdf 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _XT_OSF_H
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
deleted file mode 100644
index 3c77f54560f2..000000000000
--- a/include/uapi/linux/netfilter_decnet.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_DECNET_NETFILTER_H
-#define __LINUX_DECNET_NETFILTER_H
-
-/* DECnet-specific defines for netfilter.
- * This file (C) Steve Whitehouse 1999 derived from the
- * ipv4 netfilter header file which is
- * (C)1998 Rusty Russell -- This code is GPL.
- */
-
-#include <linux/netfilter.h>
-
-/* only for userspace compatibility */
-#ifndef __KERNEL__
-
-#include <limits.h> /* for INT_MIN, INT_MAX */
-
-/* kernel define is in netfilter_defs.h */
-#define NF_DN_NUMHOOKS 7
-#endif /* ! __KERNEL__ */
-
-/* DECnet Hooks */
-/* After promisc drops, checksum checks. */
-#define NF_DN_PRE_ROUTING 0
-/* If the packet is destined for this box. */
-#define NF_DN_LOCAL_IN 1
-/* If the packet is destined for another interface. */
-#define NF_DN_FORWARD 2
-/* Packets coming from a local process. */
-#define NF_DN_LOCAL_OUT 3
-/* Packets about to hit the wire. */
-#define NF_DN_POST_ROUTING 4
-/* Input Hello Packets */
-#define NF_DN_HELLO 5
-/* Input Routing Packets */
-#define NF_DN_ROUTE 6
-
-enum nf_dn_hook_priorities {
- NF_DN_PRI_FIRST = INT_MIN,
- NF_DN_PRI_CONNTRACK = -200,
- NF_DN_PRI_MANGLE = -150,
- NF_DN_PRI_NAT_DST = -100,
- NF_DN_PRI_FILTER = 0,
- NF_DN_PRI_NAT_SRC = 100,
- NF_DN_PRI_DNRTMSG = 200,
- NF_DN_PRI_LAST = INT_MAX,
-};
-
-struct nf_dn_rtmsg {
- int nfdn_ifindex;
-};
-
-#define NFDN_RTMSG(r) ((unsigned char *)(r) + NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)))
-
-#ifndef __KERNEL__
-/* backwards compatibility for userspace */
-#define DNRMG_L1_GROUP 0x01
-#define DNRMG_L2_GROUP 0x02
-#endif
-
-enum {
- DNRNG_NLGRP_NONE,
-#define DNRNG_NLGRP_NONE DNRNG_NLGRP_NONE
- DNRNG_NLGRP_L1,
-#define DNRNG_NLGRP_L1 DNRNG_NLGRP_L1
- DNRNG_NLGRP_L2,
-#define DNRNG_NLGRP_L2 DNRNG_NLGRP_L2
- __DNRNG_NLGRP_MAX
-};
-#define DNRNG_NLGRP_MAX (__DNRNG_NLGRP_MAX - 1)
-
-#endif /*__LINUX_DECNET_NETFILTER_H*/
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 855dffb4c1c3..e2ae82e3f9f7 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -20,7 +20,7 @@
#define NETLINK_CONNECTOR 11
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
#define NETLINK_IP6_FW 13
-#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
+#define NETLINK_DNRTMSG 14 /* DECnet routing messages (obsolete) */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
/* leave room for NETLINK_DM (DM Events) */
@@ -41,12 +41,20 @@ struct sockaddr_nl {
__u32 nl_groups; /* multicast groups mask */
};
+/**
+ * struct nlmsghdr - fixed format metadata header of Netlink messages
+ * @nlmsg_len: Length of message including header
+ * @nlmsg_type: Message content type
+ * @nlmsg_flags: Additional flags
+ * @nlmsg_seq: Sequence number
+ * @nlmsg_pid: Sending process port ID
+ */
struct nlmsghdr {
- __u32 nlmsg_len; /* Length of message including header */
- __u16 nlmsg_type; /* Message content */
- __u16 nlmsg_flags; /* Additional flags */
- __u32 nlmsg_seq; /* Sequence number */
- __u32 nlmsg_pid; /* Sending process port ID */
+ __u32 nlmsg_len;
+ __u16 nlmsg_type;
+ __u16 nlmsg_flags;
+ __u32 nlmsg_seq;
+ __u32 nlmsg_pid;
};
/* Flags values */
@@ -54,7 +62,7 @@ struct nlmsghdr {
#define NLM_F_REQUEST 0x01 /* It is request message. */
#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */
-#define NLM_F_ECHO 0x08 /* Echo this request */
+#define NLM_F_ECHO 0x08 /* Receive resulting notifications */
#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */
#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */
@@ -132,6 +140,10 @@ struct nlmsgerr {
* be used - in the success case - to identify a created
* object or operation or similar (binary)
* @NLMSGERR_ATTR_POLICY: policy for a rejected attribute
+ * @NLMSGERR_ATTR_MISS_TYPE: type of a missing required attribute,
+ * %NLMSGERR_ATTR_MISS_NEST will not be present if the attribute was
+ * missing at the message level
+ * @NLMSGERR_ATTR_MISS_NEST: offset of the nest where attribute was missing
* @__NLMSGERR_ATTR_MAX: number of attributes
* @NLMSGERR_ATTR_MAX: highest attribute number
*/
@@ -141,6 +153,8 @@ enum nlmsgerr_attrs {
NLMSGERR_ATTR_OFFS,
NLMSGERR_ATTR_COOKIE,
NLMSGERR_ATTR_POLICY,
+ NLMSGERR_ATTR_MISS_TYPE,
+ NLMSGERR_ATTR_MISS_NEST,
__NLMSGERR_ATTR_MAX,
NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
@@ -337,6 +351,9 @@ enum netlink_attribute_type {
* bitfield32 type (U32)
* @NL_POLICY_TYPE_ATTR_MASK: mask of valid bits for unsigned integers (U64)
* @NL_POLICY_TYPE_ATTR_PAD: pad attribute for 64-bit alignment
+ *
+ * @__NL_POLICY_TYPE_ATTR_MAX: number of attributes
+ * @NL_POLICY_TYPE_ATTR_MAX: highest attribute number
*/
enum netlink_policy_type_attr {
NL_POLICY_TYPE_ATTR_UNSPEC,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index ffb7c573e299..c32e7616a366 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -377,14 +377,22 @@
* the non-transmitting interfaces are deleted as well.
*
* @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified
- * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC.
+ * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. %NL80211_ATTR_MAC
+ * represents peer's MLD address for MLO pairwise key. For MLO group key,
+ * the link is identified by %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT,
* %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD.
+ * For MLO connection, the link to set default key is identified by
+ * %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
* %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER,
- * and %NL80211_ATTR_KEY_SEQ attributes.
+ * and %NL80211_ATTR_KEY_SEQ attributes. %NL80211_ATTR_MAC represents
+ * peer's MLD address for MLO pairwise key. The link to add MLO
+ * group key is identified by %NL80211_ATTR_MLO_LINK_ID.
* @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX
- * or %NL80211_ATTR_MAC.
+ * or %NL80211_ATTR_MAC. %NL80211_ATTR_MAC represents peer's MLD address
+ * for MLO pairwise key. The link to delete group key is identified by
+ * %NL80211_ATTR_MLO_LINK_ID.
*
* @NL80211_CMD_GET_BEACON: (not used)
* @NL80211_CMD_SET_BEACON: change the beacon on an access point interface
@@ -4951,6 +4959,7 @@ enum nl80211_bss_scan_width {
* using the nesting index as the antenna number.
* @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz
* @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8).
+ * @NL80211_BSS_MLD_ADDR: MLD address of this BSS if connected to it.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -4977,6 +4986,7 @@ enum nl80211_bss {
NL80211_BSS_CHAIN_SIGNAL,
NL80211_BSS_FREQUENCY_OFFSET,
NL80211_BSS_MLO_LINK_ID,
+ NL80211_BSS_MLD_ADDR,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -6273,6 +6283,14 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_RADAR_BACKGROUND: Device supports background radar/CAC
* detection.
*
+ * @NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE: Device can perform a MAC address
+ * change without having to bring the underlying network device down
+ * first. For example, in station mode this can be used to vary the
+ * origin MAC address prior to a connection to a new AP for privacy
+ * or other reasons. Note that certain driver specific restrictions
+ * might apply, e.g. no scans in progress, no offchannel operations
+ * in progress, and no active connections.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6340,6 +6358,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_BSS_COLOR,
NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
NL80211_EXT_FEATURE_RADAR_BACKGROUND,
+ NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index ce3e1738d427..94066f87e9ee 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -76,6 +76,8 @@ enum ovs_datapath_cmd {
* datapath. Always present in notifications.
* @OVS_DP_ATTR_MEGAFLOW_STATS: Statistics about mega flow masks usage for the
* datapath. Always present in notifications.
+ * @OVS_DP_ATTR_IFINDEX: Interface index for a new datapath netdev. Only
+ * valid for %OVS_DP_CMD_NEW requests.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_DP_* commands.
@@ -92,6 +94,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_PER_CPU_PIDS, /* Netlink PIDS to receive upcalls in
* per-cpu dispatch mode
*/
+ OVS_DP_ATTR_IFINDEX,
__OVS_DP_ATTR_MAX
};
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 03b370062741..85be78e0e7f6 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -164,8 +164,6 @@ enum perf_event_sample_format {
PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
-
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
@@ -204,6 +202,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
+ PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -233,6 +233,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
+ PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -253,9 +255,48 @@ enum {
PERF_BR_COND_RET = 10, /* conditional function return */
PERF_BR_ERET = 11, /* exception return */
PERF_BR_IRQ = 12, /* irq */
+ PERF_BR_SERROR = 13, /* system error */
+ PERF_BR_NO_TX = 14, /* not in transaction */
+ PERF_BR_EXTEND_ABI = 15, /* extend ABI */
PERF_BR_MAX,
};
+/*
+ * Common branch speculation outcome classification
+ */
+enum {
+ PERF_BR_SPEC_NA = 0, /* Not available */
+ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
+ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
+ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
+ PERF_BR_SPEC_MAX,
+};
+
+enum {
+ PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
+ PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
+ PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
+ PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
+ PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
+ PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
+ PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
+ PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
+ PERF_BR_NEW_MAX,
+};
+
+enum {
+ PERF_BR_PRIV_UNKNOWN = 0,
+ PERF_BR_PRIV_USER = 1,
+ PERF_BR_PRIV_KERNEL = 2,
+ PERF_BR_PRIV_HV = 3,
+};
+
+#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
+
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
PERF_SAMPLE_BRANCH_KERNEL|\
@@ -1295,7 +1336,9 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0xa available */
+/* 5-0x8 available */
+#define PERF_MEM_LVLNUM_EXTN_MEM 0x09 /* Extension memory */
+#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
@@ -1313,7 +1356,7 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_SHIFT 19
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
-/* 1 free */
+#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
@@ -1363,6 +1406,7 @@ union perf_mem_data_src {
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
* type: branch type
+ * spec: branch speculation info (or 0 if not supported)
*/
struct perf_branch_entry {
__u64 from;
@@ -1373,7 +1417,10 @@ struct perf_branch_entry {
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
type:4, /* branch type */
- reserved:40;
+ spec:2, /* branch speculation info */
+ new_type:4, /* additional branch type */
+ priv:3, /* privilege level */
+ reserved:31;
};
union perf_sample_weight {
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 877309d6ca3c..648a82f32666 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -592,6 +592,8 @@ enum {
TCA_FLOWER_KEY_PPPOE_SID, /* be16 */
TCA_FLOWER_KEY_PPP_PROTO, /* be16 */
+ TCA_FLOWER_KEY_L2TPV3_SID, /* be32 */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index f292b467b27f..000eec106856 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1233,6 +1233,16 @@ enum {
#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
enum {
+ TCA_TAPRIO_TC_ENTRY_UNSPEC,
+ TCA_TAPRIO_TC_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_TC_ENTRY_MAX_SDU, /* u32 */
+
+ /* add new constants above here */
+ __TCA_TAPRIO_TC_ENTRY_CNT,
+ TCA_TAPRIO_TC_ENTRY_MAX = (__TCA_TAPRIO_TC_ENTRY_CNT - 1)
+};
+
+enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
@@ -1245,6 +1255,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
+ TCA_TAPRIO_ATTR_TC_ENTRY, /* nest */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 2bf93c0d6354..3511095c2702 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -48,12 +48,26 @@
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
+#define PSCI_1_0_FN_CPU_FREEZE PSCI_0_2_FN(11)
+#define PSCI_1_0_FN_CPU_DEFAULT_SUSPEND PSCI_0_2_FN(12)
+#define PSCI_1_0_FN_NODE_HW_STATE PSCI_0_2_FN(13)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
+#define PSCI_1_0_FN_STAT_RESIDENCY PSCI_0_2_FN(16)
+#define PSCI_1_0_FN_STAT_COUNT PSCI_0_2_FN(17)
+
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
+#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19)
+#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(19)
+#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
+#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
+#define PSCI_1_0_FN64_STAT_RESIDENCY PSCI_0_2_FN64(16)
+#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
+
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
+#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(19)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index 6f5af1a84213..2573772e2fb3 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -132,6 +132,18 @@ struct opal_read_write_table {
__u64 priv;
};
+#define OPAL_FL_SUPPORTED 0x00000001
+#define OPAL_FL_LOCKING_SUPPORTED 0x00000002
+#define OPAL_FL_LOCKING_ENABLED 0x00000004
+#define OPAL_FL_LOCKED 0x00000008
+#define OPAL_FL_MBR_ENABLED 0x00000010
+#define OPAL_FL_MBR_DONE 0x00000020
+
+struct opal_status {
+ __u32 flags;
+ __u32 reserved;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -148,5 +160,6 @@ struct opal_read_write_table {
#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done)
#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr)
#define IOC_OPAL_GENERIC_TABLE_RW _IOW('p', 235, struct opal_read_write_table)
+#define IOC_OPAL_GET_STATUS _IOR('p', 236, struct opal_status)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h
index 332b18f318f8..4fdc424c9cb3 100644
--- a/include/uapi/linux/seg6_local.h
+++ b/include/uapi/linux/seg6_local.h
@@ -28,6 +28,7 @@ enum {
SEG6_LOCAL_BPF,
SEG6_LOCAL_VRFTABLE,
SEG6_LOCAL_COUNTERS,
+ SEG6_LOCAL_FLAVORS,
__SEG6_LOCAL_MAX,
};
#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1)
@@ -110,4 +111,27 @@ enum {
#define SEG6_LOCAL_CNT_MAX (__SEG6_LOCAL_CNT_MAX - 1)
+/* SRv6 End* Flavor attributes */
+enum {
+ SEG6_LOCAL_FLV_UNSPEC,
+ SEG6_LOCAL_FLV_OPERATION,
+ SEG6_LOCAL_FLV_LCBLOCK_BITS,
+ SEG6_LOCAL_FLV_LCNODE_FN_BITS,
+ __SEG6_LOCAL_FLV_MAX,
+};
+
+#define SEG6_LOCAL_FLV_MAX (__SEG6_LOCAL_FLV_MAX - 1)
+
+/* Designed flavor operations for SRv6 End* Behavior */
+enum {
+ SEG6_LOCAL_FLV_OP_UNSPEC,
+ SEG6_LOCAL_FLV_OP_PSP,
+ SEG6_LOCAL_FLV_OP_USP,
+ SEG6_LOCAL_FLV_OP_USD,
+ SEG6_LOCAL_FLV_OP_NEXT_CSID,
+ __SEG6_LOCAL_FLV_OP_MAX
+};
+
+#define SEG6_LOCAL_FLV_OP_MAX (__SEG6_LOCAL_FLV_OP_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 1500a0f58041..7cab2c65d3d7 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -124,7 +124,8 @@ struct statx {
__u32 stx_dev_minor;
/* 0x90 */
__u64 stx_mnt_id;
- __u64 __spare2;
+ __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
/* 0xa0 */
__u64 __spare3[12]; /* Spare space for future expansion */
/* 0x100 */
@@ -152,6 +153,7 @@ struct statx {
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
+#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 653c4f94f76e..fe6c8f8f3e8c 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_BPF_H
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 6cb6101208d0..64032513cc4c 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -2,19 +2,6 @@
/*
* Copyright (c) 2008, Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
*/
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
index af6ef2cfbf3d..ac62c9a993ea 100644
--- a/include/uapi/linux/tc_act/tc_skbmod.h
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -1,12 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2016, Jamal Hadi Salim
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
-*/
+ */
#ifndef __LINUX_TC_SKBMOD_H
#define __LINUX_TC_SKBMOD_H
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 3f10dc4e7a4b..49ad4033951b 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -2,11 +2,6 @@
/*
* Copyright (c) 2016, Amir Vadai <amir@vadai.me>
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_TUNNEL_KEY_H
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 5b306fe815cc..3e1f8e57cdd2 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_VLAN_H
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index f1157d8f4acd..b66a800389cc 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -100,6 +100,20 @@
#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16
#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_128 57
+#define TLS_CIPHER_ARIA_GCM_128_IV_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_128_KEY_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_128_SALT_SIZE 4
+#define TLS_CIPHER_ARIA_GCM_128_TAG_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE 8
+
+#define TLS_CIPHER_ARIA_GCM_256 58
+#define TLS_CIPHER_ARIA_GCM_256_IV_SIZE 8
+#define TLS_CIPHER_ARIA_GCM_256_KEY_SIZE 32
+#define TLS_CIPHER_ARIA_GCM_256_SALT_SIZE 4
+#define TLS_CIPHER_ARIA_GCM_256_TAG_SIZE 16
+#define TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE 8
+
#define TLS_SET_RECORD_TYPE 1
#define TLS_GET_RECORD_TYPE 2
@@ -156,6 +170,22 @@ struct tls12_crypto_info_sm4_ccm {
unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE];
};
+struct tls12_crypto_info_aria_gcm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_ARIA_GCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_ARIA_GCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_ARIA_GCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_aria_gcm_256 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_ARIA_GCM_256_IV_SIZE];
+ unsigned char key[TLS_CIPHER_ARIA_GCM_256_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_ARIA_GCM_256_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE];
+};
+
enum {
TLS_INFO_UNSPEC,
TLS_INFO_VERSION,
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 677edaab2b66..8f88e3a29998 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -17,7 +17,8 @@
#define UBLK_CMD_STOP_DEV 0x07
#define UBLK_CMD_SET_PARAMS 0x08
#define UBLK_CMD_GET_PARAMS 0x09
-
+#define UBLK_CMD_START_USER_RECOVERY 0x10
+#define UBLK_CMD_END_USER_RECOVERY 0x11
/*
* IO commands, issued by ublk server, and handled by ublk driver.
*
@@ -74,9 +75,14 @@
*/
#define UBLK_F_NEED_GET_DATA (1UL << 2)
+#define UBLK_F_USER_RECOVERY (1UL << 3)
+
+#define UBLK_F_USER_RECOVERY_REISSUE (1UL << 4)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
+#define UBLK_S_DEV_QUIESCED 2
/* shipped via sqe->cmd of io_uring command */
struct ublksrv_ctrl_cmd {
diff --git a/include/uapi/linux/usbip.h b/include/uapi/linux/usbip.h
index fd393d908d8a..e4421ad55b2e 100644
--- a/include/uapi/linux/usbip.h
+++ b/include/uapi/linux/usbip.h
@@ -24,4 +24,30 @@ enum usbip_device_status {
VDEV_ST_USED,
VDEV_ST_ERROR
};
+
+/* USB URB Transfer flags:
+ *
+ * USBIP server and client (vchi) pack URBs in TCP packets. The following
+ * are the transfer type defines used in USBIP protocol.
+ */
+
+#define USBIP_URB_SHORT_NOT_OK 0x0001
+#define USBIP_URB_ISO_ASAP 0x0002
+#define USBIP_URB_NO_TRANSFER_DMA_MAP 0x0004
+#define USBIP_URB_ZERO_PACKET 0x0040
+#define USBIP_URB_NO_INTERRUPT 0x0080
+#define USBIP_URB_FREE_BUFFER 0x0100
+#define USBIP_URB_DIR_IN 0x0200
+#define USBIP_URB_DIR_OUT 0
+#define USBIP_URB_DIR_MASK USBIP_URB_DIR_IN
+
+#define USBIP_URB_DMA_MAP_SINGLE 0x00010000
+#define USBIP_URB_DMA_MAP_PAGE 0x00020000
+#define USBIP_URB_DMA_MAP_SG 0x00040000
+#define USBIP_URB_MAP_LOCAL 0x00080000
+#define USBIP_URB_SETUP_MAP_SINGLE 0x00100000
+#define USBIP_URB_SETUP_MAP_LOCAL 0x00200000
+#define USBIP_URB_DMA_SG_COMBINED 0x00400000
+#define USBIP_URB_ALIGNED_TEMP_BUFFER 0x00800000
+
#endif /* _UAPI_LINUX_USBIP_H */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 7d32b1e797fb..005e5e306266 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -12,6 +12,10 @@
#include <linux/types.h>
+/* ioctls for /dev/userfaultfd */
+#define USERFAULTFD_IOC 0xAA
+#define USERFAULTFD_IOC_NEW _IO(USERFAULTFD_IOC, 0x00)
+
/*
* If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and
* UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 5f46bf4a570c..b5e7d082b8ad 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -225,6 +225,12 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180)
+/*
+ * The base for DW100 driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_DW100_BASE (V4L2_CID_USER_BASE + 0x1190)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
@@ -1730,7 +1736,7 @@ struct v4l2_vp8_segment {
* @sharpness_level: matches sharpness_level syntax element.
* @level: matches loop_filter_level syntax element.
* @padding: padding field. Should be zeroed by applications.
- * @flags: see V4L2_VP8_LF_FLAG_{}.
+ * @flags: see V4L2_VP8_LF_{}.
*
* This structure contains loop filter related parameters.
* See the 'mb_lf_adjustments()' part of the frame header syntax,
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index 25c55cab3d7c..9bd79235c875 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -46,12 +46,18 @@ enum vdpa_attr {
VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
+ /* virtio features that are supported by the vDPA management device */
VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
+ VDPA_ATTR_DEV_FEATURES, /* u64 */
+
+ /* virtio features that are supported by the vDPA device */
+ VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, /* u64 */
+
/* new attributes must be added above here */
VDPA_ATTR_MAX,
};
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 733a1cddde30..d7d8e0922376 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -986,6 +986,148 @@ enum vfio_device_mig_state {
VFIO_DEVICE_STATE_RUNNING_P2P = 5,
};
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power
+ * state with the platform-based power management. Device use of lower power
+ * states depends on factors managed by the runtime power management core,
+ * including system level support and coordinating support among dependent
+ * devices. Enabling device low power entry does not guarantee lower power
+ * usage by the device, nor is a mechanism provided through this feature to
+ * know the current power state of the device. If any device access happens
+ * (either from the host or through the vfio uAPI) when the device is in the
+ * low power state, then the host will move the device out of the low power
+ * state as necessary prior to the access. Once the access is completed, the
+ * device may re-enter the low power state. For single shot low power support
+ * with wake-up notification, see
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below. Access to mmap'd
+ * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after
+ * calling LOW_POWER_EXIT.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3
+
+/*
+ * This device feature has the same behavior as
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user
+ * provides an eventfd for wake-up notification. When the device moves out of
+ * the low power state for the wake-up, the host will not allow the device to
+ * re-enter a low power state without a subsequent user call to one of the low
+ * power entry device feature IOCTLs. Access to mmap'd device regions is
+ * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the
+ * low power exit. The low power exit can happen either through LOW_POWER_EXIT
+ * or through any other access (where the wake-up notification has been
+ * generated). The access to mmap'd device regions will not trigger low power
+ * exit.
+ *
+ * The notification through the provided eventfd will be generated only when
+ * the device has entered and is resumed from a low power state after
+ * calling this device feature IOCTL. A device that has not entered low power
+ * state, as managed through the runtime power management core, will not
+ * generate a notification through the provided eventfd on access. Calling the
+ * LOW_POWER_EXIT feature is optional in the case where notification has been
+ * signaled on the provided eventfd that a resume from low power has occurred.
+ */
+struct vfio_device_low_power_entry_with_wakeup {
+ __s32 wakeup_eventfd;
+ __u32 reserved;
+};
+
+#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as
+ * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or
+ * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features.
+ * This device feature IOCTL may itself generate a wakeup eventfd notification
+ * in the latter case if the device had previously entered a low power state.
+ */
+#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging.
+ * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports
+ * DMA logging.
+ *
+ * DMA logging allows a device to internally record what DMAs the device is
+ * initiating and report them back to userspace. It is part of the VFIO
+ * migration infrastructure that allows implementing dirty page tracking
+ * during the pre copy phase of live migration. Only DMA WRITEs are logged,
+ * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE.
+ *
+ * When DMA logging is started a range of IOVAs to monitor is provided and the
+ * device can optimize its logging to cover only the IOVA range given. Each
+ * DMA that the device initiates inside the range will be logged by the device
+ * for later retrieval.
+ *
+ * page_size is an input that hints what tracking granularity the device
+ * should try to achieve. If the device cannot do the hinted page size then
+ * it's the driver choice which page size to pick based on its support.
+ * On output the device will return the page size it selected.
+ *
+ * ranges is a pointer to an array of
+ * struct vfio_device_feature_dma_logging_range.
+ *
+ * The core kernel code guarantees to support by minimum num_ranges that fit
+ * into a single kernel page. User space can try higher values but should give
+ * up if the above can't be achieved as of some driver limitations.
+ *
+ * A single call to start device DMA logging can be issued and a matching stop
+ * should follow at the end. Another start is not allowed in the meantime.
+ */
+struct vfio_device_feature_dma_logging_control {
+ __aligned_u64 page_size;
+ __u32 num_ranges;
+ __u32 __reserved;
+ __aligned_u64 ranges;
+};
+
+struct vfio_device_feature_dma_logging_range {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started
+ * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START
+ */
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7
+
+/*
+ * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log
+ *
+ * Query the device's DMA log for written pages within the given IOVA range.
+ * During querying the log is cleared for the IOVA range.
+ *
+ * bitmap is a pointer to an array of u64s that will hold the output bitmap
+ * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits
+ * is given by:
+ * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64))
+ *
+ * The input page_size can be any power of two value and does not have to
+ * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver
+ * will format its internal logging to match the reporting page size, possibly
+ * by replicating bits if the internal page size is lower than requested.
+ *
+ * The LOGGING_REPORT will only set bits in the bitmap and never clear or
+ * perform any initialization of the user provided bitmap.
+ *
+ * If any error is returned userspace should assume that the dirty log is
+ * corrupted. Error recovery is to consider all memory dirty and try to
+ * restart the dirty tracking, or to abort/restart the whole migration.
+ *
+ * If DMA logging is not enabled, an error will be returned.
+ *
+ */
+struct vfio_device_feature_dma_logging_report {
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 bitmap;
+};
+
+#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 01e630f2ec78..86cae23cc446 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -502,7 +502,6 @@ struct v4l2_capability {
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
-#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
@@ -2435,6 +2434,7 @@ struct v4l2_event_vsync {
#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
+#define V4L2_EVENT_CTRL_CH_DIMENSIONS (1 << 3)
struct v4l2_event_ctrl {
__u32 changes;
@@ -2682,6 +2682,11 @@ struct v4l2_create_buffers {
#ifndef __KERNEL__
#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16
#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32
+/*
+ * This capability was never implemented, anyone using this cap should drop it
+ * from their code.
+ */
+#define V4L2_CAP_ASYNCIO 0x02000000
#endif
#endif /* _UAPI__LINUX_VIDEODEV2_H */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index d888f013d9ff..58e70b24b504 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -40,6 +40,7 @@
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
+#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -121,6 +122,21 @@ struct virtio_blk_config {
__u8 write_zeroes_may_unmap;
__u8 unused1[3];
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_SECURE_ERASE */
+ /*
+ * The maximum secure erase sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __virtio32 max_secure_erase_sectors;
+ /*
+ * The maximum number of secure erase segments in a
+ * secure erase command.
+ */
+ __virtio32 max_secure_erase_seg;
+ /* Secure erase commands must be aligned to this number of sectors. */
+ __virtio32 secure_erase_sector_alignment;
+
} __attribute__((packed));
/*
@@ -155,6 +171,9 @@ struct virtio_blk_config {
/* Write zeroes command */
#define VIRTIO_BLK_T_WRITE_ZEROES 13
+/* Secure erase command */
+#define VIRTIO_BLK_T_SECURE_ERASE 14
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 5d06d5c74dd1..e00ebe05097d 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -707,6 +707,25 @@ enum hl_server_type {
HL_SERVER_GAUDI2_HLS2 = 5
};
+/*
+ * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command
+ *
+ * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event
+ * HL_NOTIFIER_EVENT_UNDEFINED_OPCODE - Indicates undefined operation code
+ * HL_NOTIFIER_EVENT_DEVICE_RESET - Indicates device requires a reset
+ * HL_NOTIFIER_EVENT_CS_TIMEOUT - Indicates CS timeout error
+ * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable
+ * HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state
+ * HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error
+ */
+#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
+#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
+#define HL_NOTIFIER_EVENT_DEVICE_RESET (1ULL << 2)
+#define HL_NOTIFIER_EVENT_CS_TIMEOUT (1ULL << 3)
+#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4)
+#define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5)
+#define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6)
+
/* Opcode for management ioctl
*
* HW_IP_INFO - Receive information about different IP blocks in the
@@ -754,6 +773,7 @@ enum hl_server_type {
* Razwi initiator.
* Razwi cause, was it a page fault or MMU access error.
* HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
+ * HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot.
* HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
* HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
* HL_INFO_GET_EVENTS - Retrieve the last occurred events
@@ -783,14 +803,19 @@ enum hl_server_type {
#define HL_INFO_CS_TIMEOUT_EVENT 24
#define HL_INFO_RAZWI_EVENT 25
#define HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES 26
+#define HL_INFO_SECURED_ATTESTATION 27
#define HL_INFO_REGISTER_EVENTFD 28
#define HL_INFO_UNREGISTER_EVENTFD 29
#define HL_INFO_GET_EVENTS 30
#define HL_INFO_UNDEFINED_OPCODE_EVENT 31
+#define HL_INFO_ENGINE_STATUS 32
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
+/* Maximum buffer size for retrieving engines status */
+#define HL_ENGINES_DATA_MAX_SIZE SZ_1M
+
/**
* struct hl_info_hw_ip_info - hardware information on various IPs in the ASIC
* @sram_base_address: The first SRAM physical base address that is free to be
@@ -821,6 +846,7 @@ enum hl_server_type {
* @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant
* for Goya/Gaudi only.
* @dram_enabled: Whether the DRAM is enabled.
+ * @security_enabled: Whether security is enabled on device.
* @mme_master_slave_mode: Indicate whether the MME is working in master/slave
* configuration. Relevant for Greco and later.
* @cpucp_version: The CPUCP f/w version.
@@ -852,7 +878,7 @@ struct hl_info_hw_ip_info {
__u32 psoc_pci_pll_div_factor;
__u8 tpc_enabled_mask;
__u8 dram_enabled;
- __u8 reserved;
+ __u8 security_enabled;
__u8 mme_master_slave_mode;
__u8 cpucp_version[HL_INFO_VERSION_MAX_LEN];
__u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
@@ -876,13 +902,13 @@ struct hl_info_hw_idle {
__u32 is_idle;
/*
* Bitmask of busy engines.
- * Bits definition is according to `enum <chip>_enging_id'.
+ * Bits definition is according to `enum <chip>_engine_id'.
*/
__u32 busy_engines_mask;
/*
* Extended Bitmask of busy engines.
- * Bits definition is according to `enum <chip>_enging_id'.
+ * Bits definition is according to `enum <chip>_engine_id'.
*/
__u64 busy_engines_mask_ext[HL_BUSY_ENGINES_MASK_EXT_SIZE];
};
@@ -1078,12 +1104,12 @@ struct hl_info_razwi_event {
* struct hl_info_undefined_opcode_event - info about last undefined opcode error
* @timestamp: timestamp of the undefined opcode error
* @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
- * entiers. In case all streams array entries are
+ * entries. In case all streams array entries are
* filled with values, it means the execution was in Lower-CP.
* @cq_addr: the address of the current handled command buffer
* @cq_size: the size of the current handled command buffer
* @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
- * should be equal to 1 incase of undefined opcode
+ * should be equal to 1 in case of undefined opcode
* in Upper-CP (specific stream) and equal to 4 incase
* of undefined opcode in Lower-CP.
* @engine_id: engine-id that the error occurred on
@@ -1109,6 +1135,45 @@ struct hl_info_dev_memalloc_page_sizes {
__u64 page_order_bitmask;
};
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct hl_info_sec_attest - attestation report of the boot
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_quote: attestation report data structure
+ * @quote_sig: signature structure of the attestation report
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate: certificate for the attestation signing key
+ */
+struct hl_info_sec_attest {
+ __u32 nonce;
+ __u16 pcr_quote_len;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __u8 quote_sig_len;
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 pad0[2];
+};
+
enum gaudi_dcores {
HL_GAUDI_WS_DCORE,
HL_GAUDI_WN_DCORE,
@@ -1130,6 +1195,11 @@ enum gaudi_dcores {
* resolution. Currently not in use.
* @pll_index: Index as defined in hl_<asic type>_pll_index enumeration.
* @eventfd: event file descriptor for event notifications.
+ * @user_buffer_actual_size: Actual data size which was copied to user allocated buffer by the
+ * driver. It is possible for the user to allocate buffer larger than
+ * needed, hence updating this variable so user will know the exact amount
+ * of bytes copied by the kernel to the buffer.
+ * @sec_attest_nonce: Nonce number used for attestation report.
* @pad: Padding to 64 bit.
*/
struct hl_info_args {
@@ -1143,6 +1213,8 @@ struct hl_info_args {
__u32 period_ms;
__u32 pll_index;
__u32 eventfd;
+ __u32 user_buffer_actual_size;
+ __u32 sec_attest_nonce;
};
__u32 pad;
@@ -1337,17 +1409,47 @@ struct hl_cs_chunk {
#define HL_CS_FLAGS_RESERVE_SIGNALS_ONLY 0x1000
#define HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY 0x2000
+/*
+ * The engine cores CS is merged into the existing CS ioctls.
+ * Use it to control the engine cores mode.
+ */
+#define HL_CS_FLAGS_ENGINE_CORE_COMMAND 0x4000
+
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
+/* HL_ENGINE_CORE_ values
+ *
+ * HL_ENGINE_CORE_HALT: engine core halt
+ * HL_ENGINE_CORE_RUN: engine core run
+ */
+#define HL_ENGINE_CORE_HALT (1 << 0)
+#define HL_ENGINE_CORE_RUN (1 << 1)
+
struct hl_cs_in {
- /* this holds address of array of hl_cs_chunk for restore phase */
- __u64 chunks_restore;
+ union {
+ struct {
+ /* this holds address of array of hl_cs_chunk for restore phase */
+ __u64 chunks_restore;
+
+ /* holds address of array of hl_cs_chunk for execution phase */
+ __u64 chunks_execute;
+ };
+
+ /* Valid only when HL_CS_FLAGS_ENGINE_CORE_COMMAND is set */
+ struct {
+ /* this holds address of array of uint32 for engine_cores */
+ __u64 engine_cores;
- /* holds address of array of hl_cs_chunk for execution phase */
- __u64 chunks_execute;
+ /* number of engine cores in engine_cores array */
+ __u32 num_engine_cores;
+
+ /* the core command to be sent towards engine cores */
+ __u32 core_command;
+ };
+ };
union {
/*
@@ -1412,7 +1514,7 @@ struct hl_cs_out {
/* Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set */
struct {
- /* This is the resereved signal handle id */
+ /* This is the reserved signal handle id */
__u32 handle_id;
/* This is the signals count */
@@ -1875,21 +1977,6 @@ struct hl_debug_args {
};
/*
- * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command
- *
- * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event
- * HL_NOTIFIER_EVENT_UNDEFINED_OPCODE - Indicates undefined operation code
- * HL_NOTIFIER_EVENT_DEVICE_RESET - Indicates device requires a reset
- * HL_NOTIFIER_EVENT_CS_TIMEOUT - Indicates CS timeout error
- * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable
- */
-#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0)
-#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1)
-#define HL_NOTIFIER_EVENT_DEVICE_RESET (1ULL << 2)
-#define HL_NOTIFIER_EVENT_CS_TIMEOUT (1ULL << 3)
-#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4)
-
-/*
* Various information operations such as:
* - H/W IP information
* - Current dram usage
diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h
index 1faef5ff87ef..3e66dbc2f323 100644
--- a/include/uapi/misc/uacce/hisi_qm.h
+++ b/include/uapi/misc/uacce/hisi_qm.h
@@ -14,11 +14,26 @@ struct hisi_qp_ctx {
__u16 qc_type;
};
+/**
+ * struct hisi_qp_info - User data for hisi qp.
+ * @sqe_size: Submission queue element size
+ * @sq_depth: The number of sqe
+ * @cq_depth: The number of cqe
+ * @reserved: Reserved data
+ */
+struct hisi_qp_info {
+ __u32 sqe_size;
+ __u16 sq_depth;
+ __u16 cq_depth;
+ __u64 reserved;
+};
+
#define HISI_QM_API_VER_BASE "hisi_qm_v1"
#define HISI_QM_API_VER2_BASE "hisi_qm_v2"
#define HISI_QM_API_VER3_BASE "hisi_qm_v3"
/* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */
#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx)
-
+/* UACCE_CMD_QM_SET_QP_INFO: Set qp depth and BD size */
+#define UACCE_CMD_QM_SET_QP_INFO _IOWR('H', 11, struct hisi_qp_info)
#endif
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 890d9e5b76d7..714d55b49d2a 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -55,9 +55,9 @@ struct mtd_oob_buf64 {
* @MTD_OPS_RAW: data are transferred as-is, with no error correction;
* this mode implies %MTD_OPS_PLACE_OOB
*
- * These modes can be passed to ioctl(MEMWRITE) and are also used internally.
- * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs.
- * %MTD_FILE_MODE_RAW.
+ * These modes can be passed to ioctl(MEMWRITE) and ioctl(MEMREAD); they are
+ * also used internally. See notes on "MTD file modes" for discussion on
+ * %MTD_OPS_RAW vs. %MTD_FILE_MODE_RAW.
*/
enum {
MTD_OPS_PLACE_OOB = 0,
@@ -91,6 +91,53 @@ struct mtd_write_req {
__u8 padding[7];
};
+/**
+ * struct mtd_read_req_ecc_stats - ECC statistics for a read operation
+ *
+ * @uncorrectable_errors: the number of uncorrectable errors that happened
+ * during the read operation
+ * @corrected_bitflips: the number of bitflips corrected during the read
+ * operation
+ * @max_bitflips: the maximum number of bitflips detected in any single ECC
+ * step for the data read during the operation; this information
+ * can be used to decide whether the data stored in a specific
+ * region of the MTD device should be moved somewhere else to
+ * avoid data loss.
+ */
+struct mtd_read_req_ecc_stats {
+ __u32 uncorrectable_errors;
+ __u32 corrected_bitflips;
+ __u32 max_bitflips;
+};
+
+/**
+ * struct mtd_read_req - data structure for requesting a read operation
+ *
+ * @start: start address
+ * @len: length of data buffer (only lower 32 bits are used)
+ * @ooblen: length of OOB buffer (only lower 32 bits are used)
+ * @usr_data: user-provided data buffer
+ * @usr_oob: user-provided OOB buffer
+ * @mode: MTD mode (see "MTD operation modes")
+ * @padding: reserved, must be set to 0
+ * @ecc_stats: ECC statistics for the read operation
+ *
+ * This structure supports ioctl(MEMREAD) operations, allowing data and/or OOB
+ * reads in various modes. To read from OOB-only, set @usr_data == NULL, and to
+ * read data-only, set @usr_oob == NULL. However, setting both @usr_data and
+ * @usr_oob to NULL is not allowed.
+ */
+struct mtd_read_req {
+ __u64 start;
+ __u64 len;
+ __u64 ooblen;
+ __u64 usr_data;
+ __u64 usr_oob;
+ __u8 mode;
+ __u8 padding[7];
+ struct mtd_read_req_ecc_stats ecc_stats;
+};
+
#define MTD_ABSENT 0
#define MTD_RAM 1
#define MTD_ROM 2
@@ -207,6 +254,12 @@ struct otp_info {
#define MEMWRITE _IOWR('M', 24, struct mtd_write_req)
/* Erase a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */
#define OTPERASE _IOW('M', 25, struct otp_info)
+/*
+ * Most generic read interface; can read in-band and/or out-of-band in various
+ * modes (see "struct mtd_read_req"). This ioctl is not supported for flashes
+ * without OOB, e.g., NOR flash.
+ */
+#define MEMREAD _IOWR('M', 26, struct mtd_read_req)
/*
* Obsolete legacy interface. Keep it in order not to break userspace
@@ -270,8 +323,9 @@ struct mtd_ecc_stats {
* Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
* raw access to the flash, without error correction or autoplacement schemes.
* Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode
- * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is
- * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)).
+ * (e.g., when using ioctl(MEMWRITE) or ioctl(MEMREAD)), but in some cases, the
+ * MTD_FILE_MODE is used out of necessity (e.g., `write()',
+ * ioctl(MEMWRITEOOB64)).
*/
enum mtd_file_modes {
MTD_FILE_MODE_NORMAL = MTD_OTP_OFF,
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 08035ccf1fff..163ac79556d6 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
@@ -54,6 +54,7 @@ struct efa_ibv_alloc_pd_resp {
enum {
EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL = 1 << 0,
+ EFA_CREATE_CQ_WITH_SGID = 1 << 1,
};
struct efa_ibv_create_cq {
@@ -118,6 +119,7 @@ enum {
EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1,
EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS = 1 << 2,
+ EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3,
};
struct efa_ibv_ex_query_device_resp {
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 86be4a92b67b..a96b7d2770e1 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -104,6 +104,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3,
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG = 1UL << 5,
};
enum mlx5_user_cmds_supp_uhw {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 3bee490eb585..595edad03dfe 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -174,6 +174,7 @@ enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
};
enum mlx5_ib_devx_umem_dereg_attrs {
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index f09c5c9e3dd5..73f679dfd2df 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -74,7 +74,7 @@ struct rxe_av {
struct rxe_send_wr {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 reserved;
__u32 opcode;
__u32 send_flags;
union {
@@ -166,7 +166,7 @@ struct rxe_send_wqe {
struct rxe_recv_wqe {
__aligned_u64 wr_id;
- __u32 num_sge;
+ __u32 reserved;
__u32 padding;
struct rxe_dma_info dma;
};
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index 7535253f1a96..b46e9cbeb001 100644
--- a/include/uapi/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
@@ -35,7 +35,7 @@
* FC Transport Broadcast Event Message :
* FC_NL_ASYNC_EVENT
*
- * Note: if Vendor Unique message, &event_data will be start of
+ * Note: if Vendor Unique message, event_data_flex will be start of
* vendor unique payload, and the length of the payload is
* per event_datalen
*
@@ -50,7 +50,10 @@ struct fc_nl_event {
__u16 event_datalen;
__u32 event_num;
__u32 event_code;
- __u32 event_data;
+ union {
+ __u32 event_data;
+ __DECLARE_FLEX_ARRAY(__u8, event_data_flex);
+ };
} __attribute__((aligned(sizeof(__u64))));
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index a75e14edc957..6d4a2c60808d 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -3,22 +3,6 @@
* Main header file for the ALSA sequencer
* Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
* (c) 1998-1999 by Jaroslav Kysela <perex@perex.cz>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASEQUENCER_H
#define _UAPI__SOUND_ASEQUENCER_H
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 053949287ce8..9f35bedafcff 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -5,10 +5,6 @@
* Copyright (C) 2012 Texas Instruments Inc.
* Copyright (C) 2015 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
* algorithms, equalisers, DAIs, widgets etc.
*/
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 3974a2a911cc..de6810e94abe 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -3,22 +3,6 @@
* Advanced Linux Sound Architecture - ALSA - Driver
* Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
* Abramo Bagnara <abramo@alsa-project.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASOUND_H
diff --git a/include/uapi/sound/asound_fm.h b/include/uapi/sound/asound_fm.h
index 8471f404ff0b..25ec5e38af5c 100644
--- a/include/uapi/sound/asound_fm.h
+++ b/include/uapi/sound/asound_fm.h
@@ -10,21 +10,6 @@
* 4Front Technologies
*
* Direct FM control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#define SNDRV_DM_FM_MODE_OPL2 0x00
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 3aef123dbd7f..d185957f3fe0 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -5,23 +5,6 @@
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
*/
#ifndef __COMPRESS_OFFLOAD_H
#define __COMPRESS_OFFLOAD_H
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 726361716919..ddc77322d571 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -7,47 +7,13 @@
* Authors: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
* Vinod Koul <vinod.koul@linux.intel.com>
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The definitions in this file are derived from the OpenMAX AL version 1.1
- * and OpenMAX IL v 1.1.2 header files which contain the copyright notice below.
+ * and OpenMAX IL v 1.1.2 header files which contain the copyright notice below
+ * and are licensed under the MIT license.
*
* Copyright (c) 2007-2010 The Khronos Group Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and/or associated documentation files (the
- * "Materials "), to deal in the Materials without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Materials, and to
- * permit persons to whom the Materials are furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Materials.
- *
- * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
- *
*/
#ifndef __SND_COMPRESS_PARAMS_H
#define __SND_COMPRESS_PARAMS_H
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index 88609cc0524c..1c1f1dd44611 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -3,22 +3,6 @@
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>,
* Creative Labs, Inc.
* Definitions for EMU10K1 (SB Live!) chips
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_EMU10K1_H
#define _UAPI__SOUND_EMU10K1_H
diff --git a/include/uapi/sound/hdsp.h b/include/uapi/sound/hdsp.h
index b8df62b60f4d..0961954658d6 100644
--- a/include/uapi/sound/hdsp.h
+++ b/include/uapi/sound/hdsp.h
@@ -4,20 +4,6 @@
/*
* Copyright (C) 2003 Thomas Charbonnel (thomas@undata.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __linux__
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index 14af3d00ea3f..7043bb3d435a 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -4,21 +4,6 @@
/*
* Copyright (C) 2003 Winfried Ritsch (IEM)
* based on hdsp.h from Thomas Charbonnel (thomas@undata.org)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __linux__
diff --git a/include/uapi/sound/sb16_csp.h b/include/uapi/sound/sb16_csp.h
index e64851481d88..5a80f5ec02ee 100644
--- a/include/uapi/sound/sb16_csp.h
+++ b/include/uapi/sound/sb16_csp.h
@@ -4,21 +4,6 @@
* Takashi Iwai <tiwai@suse.de>
*
* SB16ASP/AWE32 CSP control
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_SB16_CSP_H
#define _UAPI__SOUND_SB16_CSP_H
diff --git a/include/uapi/sound/sfnt_info.h b/include/uapi/sound/sfnt_info.h
index c9a810a6ef48..f2b5e13fb5a7 100644
--- a/include/uapi/sound/sfnt_info.h
+++ b/include/uapi/sound/sfnt_info.h
@@ -6,21 +6,6 @@
* Patch record compatible with AWE driver on OSS
*
* Copyright (C) 1999-2000 Takashi Iwai
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <sound/asound.h>
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index ff3748e9308a..defeb0c6ed20 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -4,16 +4,6 @@
*
* Copyright (C) 2016 Intel Corp
* Author: Shreyas NC <shreyas.nc@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __SND_SST_TOKENS_H__
#define __SND_SST_TOKENS_H__
diff --git a/include/uapi/sound/tlv.h b/include/uapi/sound/tlv.h
index 7d6d65f60a42..b99a2414b53d 100644
--- a/include/uapi/sound/tlv.h
+++ b/include/uapi/sound/tlv.h
@@ -1,15 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
#ifndef __UAPI_SOUND_TLV_H
#define __UAPI_SOUND_TLV_H
diff --git a/include/uapi/sound/usb_stream.h b/include/uapi/sound/usb_stream.h
index ffdd3ea1e31d..50609016185a 100644
--- a/include/uapi/sound/usb_stream.h
+++ b/include/uapi/sound/usb_stream.h
@@ -1,20 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (C) 2007, 2008 Karsten Wiese <fzu@wemgehoertderstaat.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI__SOUND_USB_STREAM_H
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 7fe1a926cd99..9f28349ebcff 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -160,8 +160,10 @@ struct ufs_pm_lvl_states {
* @task_tag: Task tag of the command
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
- * @issue_time_stamp: time stamp for debug purposes
- * @compl_time_stamp: time stamp for statistics
+ * @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
+ * @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
+ * @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
+ * @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
@@ -185,7 +187,9 @@ struct ufshcd_lrb {
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
ktime_t issue_time_stamp;
+ u64 issue_time_stamp_local_clock;
ktime_t compl_time_stamp;
+ u64 compl_time_stamp_local_clock;
#ifdef CONFIG_SCSI_UFS_CRYPTO
int crypto_key_slot;
u64 data_unit_num;
@@ -430,7 +434,7 @@ struct ufs_clk_scaling {
struct ufs_event_hist {
int pos;
u32 val[UFS_EVENT_HIST_LENGTH];
- ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
+ u64 tstamp[UFS_EVENT_HIST_LENGTH];
unsigned long long cnt;
};
@@ -446,10 +450,10 @@ struct ufs_event_hist {
*/
struct ufs_stats {
u32 last_intr_status;
- ktime_t last_intr_ts;
+ u64 last_intr_ts;
u32 hibern8_exit_cnt;
- ktime_t last_hibern8_exit_tstamp;
+ u64 last_hibern8_exit_tstamp;
struct ufs_event_hist event[UFS_EVT_CNT];
};
@@ -660,6 +664,12 @@ enum ufshcd_caps {
* notification if it is supported by the UFS device.
*/
UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
+
+ /*
+ * Enable WriteBooster when scaling up the clock and disable
+ * WriteBooster when scaling the clock down.
+ */
+ UFSHCD_CAP_WB_WITH_CLK_SCALING = 1 << 12,
};
struct ufs_hba_variant_params {
@@ -1017,6 +1027,11 @@ static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_WB_EN;
}
+static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -1160,26 +1175,6 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
}
-/* Expose Query-Request API */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len);
-int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size);
-int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector,
- u32 *attr_val);
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
-int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, u8 index, bool *flag_res);
-
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
@@ -1211,6 +1206,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
diff --git a/include/video/vga.h b/include/video/vga.h
index d334e64c1c19..947c0abd04ef 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -2,15 +2,15 @@
* linux/include/video/vga.h -- standard VGA chipset interaction
*
* Copyright 1999 Jeff Garzik <jgarzik@pobox.com>
- *
+ *
* Copyright history from vga16fb.c:
* Copyright 1999 Ben Pfaff and Petr Vandrovec
- * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
+ * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm
* Based on VESA framebuffer (c) 1998 Gerd Knorr
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
- * archive for more details.
+ * archive for more details.
*
*/
@@ -22,6 +22,8 @@
#include <asm/vga.h>
#include <asm/byteorder.h>
+#define VGA_FB_PHYS_BASE 0xA0000 /* VGA framebuffer I/O base */
+#define VGA_FB_PHYS_SIZE 65536 /* VGA framebuffer I/O size */
/* Some of the code below is taken from SVGAlib. The original,
unmodified copyright notice for that code is below. */
@@ -190,7 +192,7 @@ struct vgastate {
__u32 num_gfx; /* number of gfx registers, 0 for default */
__u32 num_seq; /* number of seq registers, 0 for default */
void *vidstate;
-};
+};
extern int save_vga(struct vgastate *state);
extern int restore_vga(struct vgastate *state);
@@ -198,7 +200,7 @@ extern int restore_vga(struct vgastate *state);
/*
* generic VGA port read/write
*/
-
+
static inline unsigned char vga_io_r (unsigned short port)
{
return inb_p(port);
@@ -261,7 +263,7 @@ static inline void vga_w_fast (void __iomem *regbase, unsigned short port,
/*
* VGA CRTC register read/write
*/
-
+
static inline unsigned char vga_rcrt (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_CRT_IC, reg);
@@ -314,7 +316,7 @@ static inline void vga_mm_wcrt (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA sequencer register read/write
*/
-
+
static inline unsigned char vga_rseq (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_SEQ_I, reg);
@@ -366,7 +368,7 @@ static inline void vga_mm_wseq (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA graphics controller register read/write
*/
-
+
static inline unsigned char vga_rgfx (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_GFX_I, reg);
@@ -419,7 +421,7 @@ static inline void vga_mm_wgfx (void __iomem *regbase, unsigned char reg, unsign
/*
* VGA attribute controller register read/write
*/
-
+
static inline unsigned char vga_rattr (void __iomem *regbase, unsigned char reg)
{
vga_w (regbase, VGA_ATT_IW, reg);
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index dae0f350c678..a34f4271a2e9 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -219,6 +219,7 @@ static inline void xen_preemptible_hcall_end(void) { }
void xen_grant_setup_dma_ops(struct device *dev);
bool xen_is_grant_dma_device(struct device *dev);
bool xen_virtio_mem_acc(struct virtio_device *dev);
+bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
#else
static inline void xen_grant_setup_dma_ops(struct device *dev)
{
@@ -234,6 +235,11 @@ static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
{
return false;
}
+
+static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
+{
+ return false;
+}
#endif /* CONFIG_XEN_GRANT_DMA_OPS */
#endif /* INCLUDE_XEN_OPS_H */