summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/asm-generic/pgtable.h20
-rw-r--r--include/asm-generic/qspinlock_types.h2
-rw-r--r--include/asm-generic/tlb.h18
-rw-r--r--include/crypto/dh.h4
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/crypto/scatterwalk.h15
-rw-r--r--include/crypto/sha.h4
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drmP.h18
-rw-r--r--include/drm/drm_atomic.h14
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_audio_component.h118
-rw-r--r--include/drm/drm_bridge.h48
-rw-r--r--include/drm/drm_client.h139
-rw-r--r--include/drm/drm_connector.h279
-rw-r--r--include/drm/drm_crtc.h276
-rw-r--r--include/drm/drm_debugfs_crc.h3
-rw-r--r--include/drm/drm_device.h21
-rw-r--r--include/drm/drm_dp_helper.h56
-rw-r--r--include/drm/drm_drv.h29
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_fb_cma_helper.h6
-rw-r--r--include/drm/drm_fb_helper.h38
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_mm.h34
-rw-r--r--include/drm/drm_mode_config.h36
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h17
-rw-r--r--include/drm/drm_of.h8
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h7
-rw-r--r--include/drm/drm_plane.h197
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h6
-rw-r--r--include/drm/drm_print.h77
-rw-r--r--include/drm/drm_property.h4
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/drm/drm_writeback.h136
-rw-r--r--include/drm/gpu_scheduler.h174
-rw-r--r--include/drm/i915_component.h85
-rw-r--r--include/drm/i915_drm.h4
-rw-r--r--include/drm/i915_pciids.h37
-rw-r--r--include/drm/tinydrm/tinydrm.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h25
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/dt-bindings/clock/actions,s700-cmu.h118
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h94
-rw-r--r--include/dt-bindings/clock/axg-clkc.h4
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h9
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h46
-rw-r--r--include/dt-bindings/clock/maxim,max9485.h18
-rw-r--r--include/dt-bindings/clock/px30-cru.h389
-rw-r--r--include/dt-bindings/clock/pxa-clock.h3
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sdm845.h45
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h148
-rw-r--r--include/dt-bindings/clock/rk3399-ddr.h56
-rw-r--r--include/dt-bindings/clock/sun8i-r40-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun8i-tcon-top.h11
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pinctrl/samsung.h7
-rw-r--r--include/dt-bindings/regulator/maxim,max77802.h5
-rw-r--r--include/dt-bindings/regulator/qcom,rpmh-regulator.h36
-rw-r--r--include/dt-bindings/soc/qcom,rpmh-rsc.h14
-rw-r--r--include/linux/acpi.h28
-rw-r--r--include/linux/ascii85.h38
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/bitfield.h7
-rw-r--r--include/linux/bitops.h22
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/blk-cgroup.h146
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h27
-rw-r--r--include/linux/blkdev.h70
-rw-r--r--include/linux/bootmem.h17
-rw-r--r--include/linux/bpf-cgroup.h81
-rw-r--r--include/linux/bpf.h99
-rw-r--r--include/linux/bpf_lirc.h5
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpfilter.h6
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cgroup.h30
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h28
-rw-r--r--include/linux/compat_time.h9
-rw-r--r--include/linux/compiler-gcc.h54
-rw-r--r--include/linux/compiler_types.h18
-rw-r--r--include/linux/console.h5
-rw-r--r--include/linux/cpu.h25
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h18
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--include/linux/cred.h15
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device.h26
-rw-r--r--include/linux/dma-buf.h21
-rw-r--r--include/linux/dma-contiguous.h2
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/dma-noncoherent.h8
-rw-r--r--include/linux/dma/pxa-dma.h9
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/filter.h112
-rw-r--r--include/linux/fs.h42
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/fsl/ptp_qoriq.h44
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/gpio.h2
-rw-r--r--include/linux/gpio/aspeed.h15
-rw-r--r--include/linux/gpio/consumer.h14
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hwmon.h32
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/idle_inject.h29
-rw-r--r--include/linux/ieee80211.h437
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_team.h18
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/buffer-dma.h2
-rw-r--r--include/linux/ima.h11
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/integrity.h13
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iomap.h47
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h3
-rw-r--r--include/linux/irqdesc.h5
-rw-r--r--include/linux/jump_label.h6
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kprobes.h53
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/leds.h36
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/list.h30
-rw-r--r--include/linux/lsm_hooks.h8
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/memblock.h76
-rw-r--r--include/linux/memcontrol.h13
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h30
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/mlx5_ifc.h188
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h1
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h23
-rw-r--r--include/linux/mm_types.h241
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/mroute_base.h3
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/rawnand.h126
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mtd/spinand.h421
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h235
-rw-r--r--include/linux/netfilter.h37
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h (renamed from include/linux/netfilter/nf_osf.h)23
-rw-r--r--include/linux/netfilter_bridge.h11
-rw-r--r--include/linux/netfilter_ipv4.h11
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h10
-rw-r--r--include/linux/nvme.h72
-rw-r--r--include/linux/openvswitch.h5
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/percpu_ida.h83
-rw-r--r--include/linux/perf/arm_pmu.h11
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phylink.h1
-rw-r--r--include/linux/pinctrl/pinconf.h3
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/gpio-davinci.h3
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h34
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/mmp_dma.h4
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h30
-rw-r--r--include/linux/pm_domain.h21
-rw-r--r--include/linux/poll.h12
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/pti.h1
-rw-r--r--include/linux/pxa2xx_ssp.h10
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/qed/qed_eth_if.h6
-rw-r--r--include/linux/qed/qed_if.h15
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reciprocal_div.h68
-rw-r--r--include/linux/refcount.h38
-rw-r--r--include/linux/regmap.h54
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/pfuze100.h11
-rw-r--r--include/linux/rfkill.h20
-rw-r--r--include/linux/rhashtable-types.h137
-rw-r--r--include/linux/rhashtable.h164
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rmi.h2
-rw-r--r--include/linux/rtmutex.h7
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/scatterlist.h18
-rw-r--r--include/linux/sched.h38
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/sctp.h7
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/sfp.h72
-rw-r--r--include/linux/skbuff.h32
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/smpboot.h15
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h180
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h8
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/spi-mem.h18
-rw-r--r--include/linux/spi/spi_bitbang.h5
-rw-r--r--include/linux/spinlock.h58
-rw-r--r--include/linux/srcu.h17
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swait.h36
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/syscalls.h26
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/t10-pi.h24
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/tpm.h7
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb/audio-v3.h19
-rw-r--r--include/linux/vga_switcheroo.h8
-rw-r--r--include/linux/virtio_config.h7
-rw-r--r--include/linux/ww_mutex.h45
-rw-r--r--include/media/cec-pin.h4
-rw-r--r--include/media/cec.h12
-rw-r--r--include/media/dvb_frontend.h49
-rw-r--r--include/media/i2c/lm3560.h1
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h56
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/net/act_api.h31
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_ieee802154.h1
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h224
-rw-r--r--include/net/bluetooth/hci_core.h34
-rw-r--r--include/net/bluetooth/mgmt.h55
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/busy_poll.h16
-rw-r--r--include/net/cfg80211.h118
-rw-r--r--include/net/dcbnl.h13
-rw-r--r--include/net/devlink.h195
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/flow_dissector.h21
-rw-r--r--include/net/gen_stats.h4
-rw-r--r--include/net/ieee80211_radiotap.h123
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_frag.h11
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/ip.h27
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_tunnels.h8
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/ipv6_frag.h104
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/lag.h17
-rw-r--r--include/net/llc.h5
-rw-r--r--include/net/mac80211.h64
-rw-r--r--include/net/net_namespace.h11
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_core.h15
-rw-r--r--include/net/netfilter/nf_conntrack_count.h37
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h84
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h39
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/netfilter/nf_tables_core.h13
-rw-r--r--include/net/netfilter/nf_tproxy.h12
-rw-r--r--include/net/netns/hash.h7
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/pkt_cls.h30
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sch_generic.h70
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/sctp/structs.h52
-rw-r--r--include/net/seg6.h2
-rw-r--r--include/net/seg6_hmac.h2
-rw-r--r--include/net/seg6_local.h4
-rw-r--r--include/net/smc.h65
-rw-r--r--include/net/sock.h81
-rw-r--r--include/net/sock_reuseport.h19
-rw-r--r--include/net/tc_act/tc_csum.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_skbedit.h37
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tcp.h74
-rw-r--r--include/net/tls.h92
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--include/net/xdp.h20
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/net/xfrm.h61
-rw-r--r--include/rdma/ib_verbs.h13
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_cmnd.h13
-rw-r--r--include/scsi/scsi_device.h14
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/soc/qcom/rpmh.h51
-rw-r--r--include/soc/qcom/tcs.h56
-rw-r--r--include/sound/ac97/codec.h8
-rw-r--r--include/sound/ac97/compat.h9
-rw-r--r--include/sound/ac97/controller.h8
-rw-r--r--include/sound/ac97/regs.h20
-rw-r--r--include/sound/ac97_codec.h25
-rw-r--r--include/sound/compress_driver.h21
-rw-r--r--include/sound/dmaengine_pcm.h14
-rw-r--r--include/sound/hda_component.h61
-rw-r--r--include/sound/hda_i915.h37
-rw-r--r--include/sound/hdaudio.h65
-rw-r--r--include/sound/hdaudio_ext.h123
-rw-r--r--include/sound/memalloc.h18
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/pcm_params.h10
-rw-r--r--include/sound/pxa2xx-lib.h13
-rw-r--r--include/sound/rt5682.h40
-rw-r--r--include/sound/sb16_csp.h2
-rw-r--r--include/sound/seq_midi_event.h6
-rw-r--r--include/sound/seq_virmidi.h3
-rw-r--r--include/sound/sh_fsi.h13
-rw-r--r--include/sound/simple_card.h7
-rw-r--r--include/sound/simple_card_utils.h23
-rw-r--r--include/sound/soc-acpi-intel-match.h19
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dai.h15
-rw-r--r--include/sound/soc-dapm.h11
-rw-r--r--include/sound/soc-dpcm.h7
-rw-r--r--include/sound/soc-topology.h37
-rw-r--r--include/sound/soc.h31
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h16
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/clk.h36
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/net.h7
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/trace/events/rxrpc.h129
-rw-r--r--include/trace/events/sock.h30
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h27
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h176
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h166
-rw-r--r--include/uapi/linux/aio_abi.h6
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/bcache.h4
-rw-r--r--include/uapi/linux/blkzoned.h2
-rw-r--r--include/uapi/linux/bpf.h130
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dcbnl.h3
-rw-r--r--include/uapi/linux/devlink.h42
-rw-r--r--include/uapi/linux/dvb/audio.h37
-rw-r--r--include/uapi/linux/dvb/video.h58
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/errqueue.h4
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/if_link.h17
-rw-r--r--include/uapi/linux/ila.h1
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/kfd_ioctl.h33
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/l2tp.h15
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h46
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/mroute.h2
-rw-r--r--include/uapi/linux/nbd.h3
-rw-r--r--include/uapi/linux/net_tstamp.h18
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h124
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_osf.h (renamed from include/uapi/linux/netfilter/nf_osf.h)32
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h22
-rw-r--r--include/uapi/linux/netfilter_bridge.h11
-rw-r--r--include/uapi/linux/nl80211.h102
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/pkt_cls.h41
-rw-r--r--include/uapi/linux/pkt_sched.h150
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/rds.h69
-rw-r--r--include/uapi/linux/rseq.h102
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/sctp.h5
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/sysctl.h3
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h9
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h28
-rw-r--r--include/uapi/linux/tcp.h14
-rw-r--r--include/uapi/linux/time.h7
-rw-r--r--include/uapi/linux/tipc_netlink.h14
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/uapi/linux/usb/audio.h49
-rw-r--r--include/uapi/linux/uvcvideo.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h20
-rw-r--r--include/uapi/linux/v4l2-subdev.h4
-rw-r--r--include/uapi/linux/vhost.h18
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/xfrm.h5
-rw-r--r--include/uapi/xen/gntdev.h106
-rw-r--r--include/video/mipi_display.h3
-rw-r--r--include/xen/grant_table.h21
-rw-r--r--include/xen/mem-reservation.h59
-rw-r--r--include/xen/xen.h6
493 files changed, 11827 insertions, 3541 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 48d84f0d9547..88072c92ace2 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20180531
+#define ACPI_CA_VERSION 0x20180629
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 1624e2be485c..82cb4eb225a4 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -118,6 +118,10 @@ static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata)
(void *)section - (void *)(estatus + 1) < estatus->data_length; \
section = acpi_hest_get_next(section))
+#ifdef CONFIG_ACPI_APEI_SEA
int ghes_notify_sea(void);
+#else
+static inline int ghes_notify_sea(void) { return -ENOENT; }
+#endif
#endif /* GHES_H */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 40a916efd7c0..1194a4c78d55 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
{
return;
}
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
int event_flag)
{
static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
"Consider compiling CPUfreq support into your kernel.\n");
printout = 0;
}
- return 0;
}
static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
{
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index ec07f23678ea..0d4b1d3dbc1e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
}
#endif
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kasan_check_write(v, sizeof(*v));
- return __arch_atomic_add_unless(v, a, u);
+ return arch_atomic_fetch_add_unless(v, a, u);
}
+#endif
-
-static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
+ return arch_atomic64_fetch_add_unless(v, a, u);
}
+#endif
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
static __always_inline void atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
+#endif
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
static __always_inline void atomic64_inc(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
+#endif
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
static __always_inline void atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
+#endif
+#ifdef atch_atomic64_dec
+#define atomic64_dec
static __always_inline void atomic64_dec(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
+#endif
static __always_inline void atomic_add(int i, atomic_t *v)
{
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
arch_atomic64_xor(i, v);
}
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
static __always_inline int atomic_inc_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
+#endif
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
+#endif
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
static __always_inline int atomic_dec_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
+#endif
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
+#endif
-static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
+#endif
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
+#endif
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#endif
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
+#endif
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
return arch_atomic64_fetch_xor(i, v);
}
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
+#endif
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#endif
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
+#define xchg(ptr, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, (new)); \
+})
#define cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
- (unsigned long)(new), sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define sync_cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define cmpxchg_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64(ptr, old, new);
-}
-
#define cmpxchg64(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64_local(ptr, old, new);
-}
-
#define cmpxchg64_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
})
-/*
- * Originally we had the following code here:
- * __typeof__(p1) ____p1 = (p1);
- * kasan_check_write(____p1, 2 * sizeof(*____p1));
- * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
- * But it leads to compilation failures (see gcc issue 72873).
- * So for now it's left non-instrumented.
- * There are few callers of cmpxchg_double(), so it's not critical.
- */
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
-({ \
- arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index abe6dd9ca2a8..13324aa828eb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
#include <linux/irqflags.h>
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
atomic_sub_return(i, v);
}
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#ifndef __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-#endif
-
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 8d28eb010d0d..97b28b7f1f29 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -11,6 +11,7 @@
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
typedef struct {
long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
-#include <asm/types.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_SMP
-#include <asm/spinlock.h>
-#include <asm/cache.h> /* we use L1_CACHE_BYTES */
-
-/* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
-# define ATOMIC_HASH_SIZE 4
-# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-
-extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
- arch_spin_lock(s); \
-} while(0)
-
-#define _atomic_spin_unlock_irqrestore(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- arch_spin_unlock(s); \
- local_irq_restore(f); \
-} while(0)
-
-
-#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
-#endif
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
/*
- * NMI events can occur at any time, including when interrupts have been
- * disabled by *_irqsave(). So you can get NMI events occurring while a
- * *_bit function is holding a spin lock. If the NMI handler also wants
- * to do bit manipulation (and they do) then you can get a deadlock
- * between the original caller of *_bit() and the NMI handler.
- *
- * by Keith Owens
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
*/
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p |= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p &= ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p ^= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old | mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
- return (old & mask) != 0;
+ old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old & ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (!(READ_ONCE(*p) & mask))
+ return 0;
- return (old & mask) != 0;
+ old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old ^ mask;
- _atomic_spin_unlock_irqrestore(p, flags);
- return (old & mask) != 0;
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 67ab280ad134..3ae021368f48 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -2,6 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
@@ -11,7 +15,20 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+static inline int test_and_set_bit_lock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
*
* This operation is atomic and provides release barrier semantics.
*/
-#define clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
*
* See for example x86's implementation.
*/
-#define __clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void __clear_bit_unlock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ unsigned long old;
-#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f59639afaa39..a75cb371cd19 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
@@ -1083,6 +1083,18 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 0763f065b975..d10f1e7d6ba8 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -63,7 +63,7 @@ typedef struct qspinlock {
/*
* Initializier
*/
-#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/*
* Bitfields in the atomic value:
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..e811ef7b8350 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -265,34 +265,52 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
*/
+#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
+#endif
+#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
+#endif
#ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#define tlb_migrate_finish(mm) do {} while (0)
+/*
+ * Used to flush the TLB when page tables are removed, when lazy
+ * TLB mode may cause a CPU to retain intermediate translations
+ * pointing to about-to-be-freed page table memory.
+ */
+#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
+#define tlb_flush_remove_tables(mm) do {} while (0)
+#define tlb_flush_remove_tables_local(mm) do {} while (0)
+#endif
+
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 71e1bb24d79f..7e0dad94cb2b 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -29,17 +29,21 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
+ * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
+ * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
void *key;
void *p;
+ void *q;
void *g;
unsigned int key_size;
unsigned int p_size;
+ unsigned int q_size;
unsigned int g_size;
};
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8f941102af36..3fb581bf3b87 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -122,11 +122,10 @@ struct drbg_state {
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
- __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
- __u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
+ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index cc414db9da0a..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 880e6be9e95e..a66c127a20ed 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -22,27 +22,14 @@
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
- struct scatterlist *sg,
- int chain, int num)
+ struct scatterlist *sg, int num)
{
- if (chain) {
- head->length += sg->length;
- sg = sg_next(sg);
- }
-
if (sg)
sg_chain(head, num, sg);
else
sg_mark_end(head);
}
-static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
- struct scatter_walk *walk_out)
-{
- return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
- (int)(walk_in->offset - walk_out->offset));
-}
-
static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
{
unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 0555b571dd34..8a46202b1857 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -71,6 +71,10 @@ extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
struct sha1_state {
u32 state[SHA1_DIGEST_SIZE / 4];
u64 count;
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1..000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
- u64 polytmp[2*VMAC_TAG_LEN/64];
- u64 cached_nonce[2];
- u64 cached_aes[2];
- int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
- struct crypto_cipher *child;
- struct vmac_ctx __vmac_ctx;
- u8 partial[VMAC_NHBYTES]; /* partial block */
- int partial_size; /* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f5099c12c6a6..f7a19c2a7a80 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -97,29 +97,11 @@ struct pci_controller;
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * drm_drv_uses_atomic_modeset - check if the driver implements
- * atomic_commit()
- * @dev: DRM device
- *
- * This check is useful if drivers do not have DRIVER_ATOMIC set but
- * have atomic modesetting internally implemented.
- */
-static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
-{
- return dev->mode_config.funcs->atomic_commit != NULL;
-}
-
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
-{
- return dev->driver->driver_features & feature;
-}
-
/* returns true if currently okay to sleep */
static inline bool drm_can_sleep(void)
{
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index a57a8aa90ffb..da9d95a19580 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -160,6 +160,14 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
struct drm_connector_state *state, *old_state, *new_state;
+ /**
+ * @out_fence_ptr:
+ *
+ * User-provided pointer which the kernel uses to return a sync_file
+ * file descriptor. Used by writeback connectors to signal completion of
+ * the writeback.
+ */
+ s32 __user *out_fence_ptr;
};
struct drm_private_obj;
@@ -594,6 +602,9 @@ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb);
int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
@@ -601,9 +612,6 @@ int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
-void
-drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
-
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 26aaba58d6ce..99e2a5297c69 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -100,6 +100,7 @@ int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state,
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
bool nonblock);
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state);
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
new file mode 100644
index 000000000000..4923b00328c1
--- /dev/null
+++ b/include/drm/drm_audio_component.h
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+// Copyright © 2014 Intel Corporation
+
+#ifndef _DRM_AUDIO_COMPONENT_H_
+#define _DRM_AUDIO_COMPONENT_H_
+
+struct drm_audio_component;
+
+/**
+ * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
+ */
+struct drm_audio_component_ops {
+ /**
+ * @owner: drm module to pin down
+ */
+ struct module *owner;
+ /**
+ * @get_power: get the POWER_DOMAIN_AUDIO power well
+ *
+ * Request the power well to be turned on.
+ */
+ void (*get_power)(struct device *);
+ /**
+ * @put_power: put the POWER_DOMAIN_AUDIO power well
+ *
+ * Allow the power well to be turned off.
+ */
+ void (*put_power)(struct device *);
+ /**
+ * @codec_wake_override: Enable/disable codec wake signal
+ */
+ void (*codec_wake_override)(struct device *, bool enable);
+ /**
+ * @get_cdclk_freq: Get the Core Display Clock in kHz
+ */
+ int (*get_cdclk_freq)(struct device *);
+ /**
+ * @sync_audio_rate: set n/cts based on the sample rate
+ *
+ * Called from audio driver. After audio driver sets the
+ * sample rate, it will call this function to set n/cts
+ */
+ int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
+ /**
+ * @get_eld: fill the audio state and ELD bytes for the given port
+ *
+ * Called from audio driver to get the HDMI/DP audio state of the given
+ * digital port, and also fetch ELD bytes to the given pointer.
+ *
+ * It returns the byte size of the original ELD (not the actually
+ * copied size), zero for an invalid ELD, or a negative error code.
+ *
+ * Note that the returned size may be over @max_bytes. Then it
+ * implies that only a part of ELD has been copied to the buffer.
+ */
+ int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes);
+};
+
+/**
+ * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver
+ */
+struct drm_audio_component_audio_ops {
+ /**
+ * @audio_ptr: Pointer to be used in call to pin_eld_notify
+ */
+ void *audio_ptr;
+ /**
+ * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
+ *
+ * Called when the DRM driver has set up audio pipeline or has just
+ * begun to tear it down. This allows the HDA driver to update its
+ * status accordingly (even when the HDA controller is in power save
+ * mode).
+ */
+ void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
+ /**
+ * @pin2port: Check and convert from pin node to port number
+ *
+ * Called by HDA driver to check and convert from the pin widget node
+ * number to a port number in the graphics side.
+ */
+ int (*pin2port)(void *audio_ptr, int pin);
+ /**
+ * @master_bind: (Optional) component master bind callback
+ *
+ * Called at binding master component, for HDA codec-specific
+ * handling of dynamic binding.
+ */
+ int (*master_bind)(struct device *dev, struct drm_audio_component *);
+ /**
+ * @master_unbind: (Optional) component master unbind callback
+ *
+ * Called at unbinding master component, for HDA codec-specific
+ * handling of dynamic unbinding.
+ */
+ void (*master_unbind)(struct device *dev, struct drm_audio_component *);
+};
+
+/**
+ * struct drm_audio_component - Used for direct communication between DRM and hda drivers
+ */
+struct drm_audio_component {
+ /**
+ * @dev: DRM device, used as parameter for ops
+ */
+ struct device *dev;
+ /**
+ * @ops: Ops implemented by DRM driver, called by hda driver
+ */
+ const struct drm_audio_component_ops *ops;
+ /**
+ * @audio_ops: Ops implemented by hda driver, called by DRM driver
+ */
+ const struct drm_audio_component_audio_ops *audio_ops;
+};
+
+#endif /* _DRM_AUDIO_COMPONENT_H_ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 3270fec46979..bd850747ce54 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -97,7 +97,7 @@ struct drm_bridge_funcs {
/**
* @mode_fixup:
*
- * This callback is used to validate and adjust a mode. The paramater
+ * This callback is used to validate and adjust a mode. The parameter
* mode is the display mode that should be fed to the next element in
* the display chain, either the final &drm_connector or the next
* &drm_bridge. The parameter adjusted_mode is the input mode the bridge
@@ -178,6 +178,22 @@ struct drm_bridge_funcs {
* then this would be &drm_encoder_helper_funcs.mode_set. The display
* pipe (i.e. clocks and timing signals) is off when this function is
* called.
+ *
+ * The adjusted_mode parameter is the mode output by the CRTC for the
+ * first bridge in the chain. It can be different from the mode
+ * parameter that contains the desired mode for the connector at the end
+ * of the bridges chain, for instance when the first bridge in the chain
+ * performs scaling. The adjusted mode is mostly useful for the first
+ * bridge in the chain and is likely irrelevant for the other bridges.
+ *
+ * For atomic drivers the adjusted_mode is the mode stored in
+ * &drm_crtc_state.adjusted_mode.
+ *
+ * NOTE:
+ *
+ * If a need arises to store and access modes adjusted for other
+ * locations than the connection between the CRTC and the first bridge,
+ * the DRM framework will have to be extended with DRM bridge states.
*/
void (*mode_set)(struct drm_bridge *bridge,
struct drm_display_mode *mode,
@@ -254,27 +270,29 @@ struct drm_bridge_timings {
/**
* struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @timings: the timing specification for the bridge, if any (may
- * be NULL)
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
+ /** @dev: DRM device this bridge belongs to */
struct drm_device *dev;
+ /** @encoder: encoder to which this bridge is connected */
struct drm_encoder *encoder;
+ /** @next: the next bridge in the encoder chain */
struct drm_bridge *next;
#ifdef CONFIG_OF
+ /** @of_node: device node pointer to the bridge */
struct device_node *of_node;
#endif
+ /** @list: to keep track of all added bridges */
struct list_head list;
+ /**
+ * @timings:
+ *
+ * the timing specification for the bridge, if any (may be NULL)
+ */
const struct drm_bridge_timings *timings;
-
+ /** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+ /** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
};
@@ -285,15 +303,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous);
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode);
void drm_bridge_disable(struct drm_bridge *bridge);
void drm_bridge_post_disable(struct drm_bridge *bridge);
void drm_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
new file mode 100644
index 000000000000..989f8e52864d
--- /dev/null
+++ b/include/drm/drm_client.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRM_CLIENT_H_
+#define _DRM_CLIENT_H_
+
+#include <linux/types.h>
+
+struct drm_client_dev;
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_minor;
+struct module;
+
+/**
+ * struct drm_client_funcs - DRM client callbacks
+ */
+struct drm_client_funcs {
+ /**
+ * @owner: The module owner
+ */
+ struct module *owner;
+
+ /**
+ * @unregister:
+ *
+ * Called when &drm_device is unregistered. The client should respond by
+ * releasing it's resources using drm_client_release().
+ *
+ * This callback is optional.
+ */
+ void (*unregister)(struct drm_client_dev *client);
+
+ /**
+ * @restore:
+ *
+ * Called on drm_lastclose(). The first client instance in the list that
+ * returns zero gets the privilege to restore and no more clients are
+ * called. This callback is not called after @unregister has been called.
+ *
+ * This callback is optional.
+ */
+ int (*restore)(struct drm_client_dev *client);
+
+ /**
+ * @hotplug:
+ *
+ * Called on drm_kms_helper_hotplug_event().
+ * This callback is not called after @unregister has been called.
+ *
+ * This callback is optional.
+ */
+ int (*hotplug)(struct drm_client_dev *client);
+};
+
+/**
+ * struct drm_client_dev - DRM client instance
+ */
+struct drm_client_dev {
+ /**
+ * @dev: DRM device
+ */
+ struct drm_device *dev;
+
+ /**
+ * @name: Name of the client.
+ */
+ const char *name;
+
+ /**
+ * @list:
+ *
+ * List of all clients of a DRM device, linked into
+ * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex.
+ */
+ struct list_head list;
+
+ /**
+ * @funcs: DRM client functions (optional)
+ */
+ const struct drm_client_funcs *funcs;
+
+ /**
+ * @file: DRM file
+ */
+ struct drm_file *file;
+};
+
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs);
+void drm_client_release(struct drm_client_dev *client);
+
+void drm_client_dev_unregister(struct drm_device *dev);
+void drm_client_dev_hotplug(struct drm_device *dev);
+void drm_client_dev_restore(struct drm_device *dev);
+
+/**
+ * struct drm_client_buffer - DRM client buffer
+ */
+struct drm_client_buffer {
+ /**
+ * @client: DRM client
+ */
+ struct drm_client_dev *client;
+
+ /**
+ * @handle: Buffer handle
+ */
+ u32 handle;
+
+ /**
+ * @pitch: Buffer pitch
+ */
+ u32 pitch;
+
+ /**
+ * @gem: GEM object backing this buffer
+ */
+ struct drm_gem_object *gem;
+
+ /**
+ * @vaddr: Virtual address for the buffer
+ */
+ void *vaddr;
+
+ /**
+ * @fb: DRM framebuffer
+ */
+ struct drm_framebuffer *fb;
+};
+
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+
+int drm_client_debugfs_init(struct drm_minor *minor);
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 675cc3f8cf85..97ea41dc678f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -290,6 +290,10 @@ struct drm_display_info {
#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
/* data is transmitted LSB to MSB on the bus */
#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
+/* drive sync on pos. edge */
+#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6)
+/* drive sync on neg. edge */
+#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7)
/**
* @bus_flags: Additional information (like pixel signal polarity) for
@@ -374,12 +378,9 @@ struct drm_tv_connector_state {
/**
* struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- * @tv: TV connector state
*/
struct drm_connector_state {
+ /** @connector: backpointer to the connector */
struct drm_connector *connector;
/**
@@ -390,6 +391,13 @@ struct drm_connector_state {
*/
struct drm_crtc *crtc;
+ /**
+ * @best_encoder:
+ *
+ * Used by the atomic helpers to select the encoder, through the
+ * &drm_connector_helper_funcs.atomic_best_encoder or
+ * &drm_connector_helper_funcs.best_encoder callbacks.
+ */
struct drm_encoder *best_encoder;
/**
@@ -398,6 +406,7 @@ struct drm_connector_state {
*/
enum drm_link_status link_status;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
/**
@@ -407,6 +416,7 @@ struct drm_connector_state {
*/
struct drm_crtc_commit *commit;
+ /** @tv: TV connector state */
struct drm_tv_connector_state tv;
/**
@@ -419,6 +429,14 @@ struct drm_connector_state {
enum hdmi_picture_aspect picture_aspect_ratio;
/**
+ * @content_type: Connector property to control the
+ * HDMI infoframe content type setting.
+ * The %DRM_MODE_CONTENT_TYPE_\* values much
+ * match the values.
+ */
+ unsigned int content_type;
+
+ /**
* @scaling_mode: Connector property to control the
* upscaling, mostly used for built-in panels.
*/
@@ -429,6 +447,19 @@ struct drm_connector_state {
* protection. This is most commonly used for HDCP.
*/
unsigned int content_protection;
+
+ /**
+ * @writeback_job: Writeback job for writeback connectors
+ *
+ * Holds the framebuffer and out-fence for a writeback connector. As
+ * the writeback completion may be asynchronous to the normal commit
+ * cycle, the writeback job lifetime is managed separately from the
+ * normal atomic state by this object.
+ *
+ * See also: drm_writeback_queue_job() and
+ * drm_writeback_signal_completion()
+ */
+ struct drm_writeback_job *writeback_job;
};
/**
@@ -530,8 +561,7 @@ struct drm_connector_funcs {
* received for this output connector->edid must be NULL.
*
* Drivers using the probe helpers should use
- * drm_helper_probe_single_connector_modes() or
- * drm_helper_probe_single_connector_modes_nomerge() to implement this
+ * drm_helper_probe_single_connector_modes() to implement this
* function.
*
* RETURNS:
@@ -608,6 +638,8 @@ struct drm_connector_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_connector_state should use
* drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -634,6 +666,8 @@ struct drm_connector_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_connector *connector,
struct drm_connector_state *state);
@@ -738,45 +772,6 @@ struct drm_cmdline_mode {
/**
* struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @eld: EDID-like data, if present
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- * @scaling_mode_property: Optional atomic property to control the upscaling.
- * @content_protection_property: Optional property to control content protection
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -784,13 +779,27 @@ struct drm_cmdline_mode {
* span multiple monitors).
*/
struct drm_connector {
+ /** @dev: parent DRM device */
struct drm_device *dev;
+ /** @kdev: kernel device for sysfs attributes */
struct device *kdev;
+ /** @attr: sysfs attributes */
struct device_attribute *attr;
+
+ /**
+ * @head:
+ *
+ * List of all connectors on a @dev, linked from
+ * &drm_mode_config.connector_list. Protected by
+ * &drm_mode_config.connector_list_lock, but please only use
+ * &drm_connector_list_iter to walk this list.
+ */
struct list_head head;
+ /** @base: base KMS object */
struct drm_mode_object base;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -808,10 +817,30 @@ struct drm_connector {
*/
unsigned index;
+ /**
+ * @connector_type:
+ * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ */
int connector_type;
+ /** @connector_type_id: index into connector type enum */
int connector_type_id;
+ /**
+ * @interlace_allowed:
+ * Can this connector handle interlaced modes? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool interlace_allowed;
+ /**
+ * @doublescan_allowed:
+ * Can this connector handle doublescan? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool doublescan_allowed;
+ /**
+ * @stereo_allowed:
+ * Can this connector handle stereo modes? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool stereo_allowed;
/**
@@ -860,45 +889,42 @@ struct drm_connector {
* Protected by &drm_mode_config.mutex.
*/
struct drm_display_info display_info;
+
+ /** @funcs: connector control functions */
const struct drm_connector_funcs *funcs;
+ /**
+ * @edid_blob_ptr: DRM property containing EDID if present. Protected by
+ * &drm_mode_config.mutex. This should be updated only by calling
+ * drm_connector_update_edid_property().
+ */
struct drm_property_blob *edid_blob_ptr;
+
+ /** @properties: property tracking for this connector */
struct drm_object_properties properties;
+ /**
+ * @scaling_mode_property: Optional atomic property to control the
+ * upscaling. See drm_connector_attach_content_protection_property().
+ */
struct drm_property *scaling_mode_property;
/**
* @content_protection_property: DRM ENUM property for content
- * protection
+ * protection. See drm_connector_attach_content_protection_property().
*/
struct drm_property *content_protection_property;
/**
* @path_blob_ptr:
*
- * DRM blob property data for the DP MST path property.
+ * DRM blob property data for the DP MST path property. This should only
+ * be updated by calling drm_connector_set_path_property().
*/
struct drm_property_blob *path_blob_ptr;
- /**
- * @tile_blob_ptr:
- *
- * DRM blob property data for the tile property (used mostly by DP MST).
- * This is meant for screens which are driven through separate display
- * pipelines represented by &drm_crtc, which might not be running with
- * genlocked clocks. For tiled panels which are genlocked, like
- * dual-link LVDS or dual-link DSI, the driver should try to not expose
- * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
- */
- struct drm_property_blob *tile_blob_ptr;
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
/**
@@ -915,25 +941,40 @@ struct drm_connector {
* Periodically poll the connector for connection.
*
* DRM_CONNECTOR_POLL_DISCONNECT
- * Periodically poll the connector for disconnection.
+ * Periodically poll the connector for disconnection, without
+ * causing flickering even when the connector is in use. DACs should
+ * rarely do this without a lot of testing.
*
* Set to 0 for connectors that don't support connection status
* discovery.
*/
uint8_t polled;
- /* requested DPMS state */
+ /**
+ * @dpms: Current dpms state. For legacy drivers the
+ * &drm_connector_funcs.dpms callback must update this. For atomic
+ * drivers, this is handled by the core atomic code, and drivers must
+ * only take &drm_crtc_state.active into account.
+ */
int dpms;
+ /** @helper_private: mid-layer private data */
const struct drm_connector_helper_funcs *helper_private;
- /* forced on connector */
+ /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */
struct drm_cmdline_mode cmdline_mode;
+ /** @force: a DRM_FORCE_<foo> state for forced mode sets */
enum drm_connector_force force;
+ /** @override_edid: has the EDID been overwritten through debugfs for testing? */
bool override_edid;
#define DRM_CONNECTOR_MAX_ENCODER 3
+ /**
+ * @encoder_ids: Valid encoders for this connector. Please only use
+ * drm_connector_for_each_possible_encoder() to enumerate these.
+ */
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+
/**
* @encoder: Currently bound encoder driving this connector, if any.
* Only really meaningful for non-atomic drivers. Atomic drivers should
@@ -943,19 +984,37 @@ struct drm_connector {
struct drm_encoder *encoder;
#define MAX_ELD_BYTES 128
- /* EDID bits */
+ /** @eld: EDID-like data, if present */
uint8_t eld[MAX_ELD_BYTES];
+ /** @latency_present: AV delay info from ELD, if found */
bool latency_present[2];
- int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ /**
+ * @video_latency: Video latency info from ELD, if found.
+ * [0]: progressive, [1]: interlaced
+ */
+ int video_latency[2];
+ /**
+ * @audio_latency: audio latency info from ELD, if found
+ * [0]: progressive, [1]: interlaced
+ */
int audio_latency[2];
- int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ /**
+ * @null_edid_counter: track sinks that give us all zeros for the EDID.
+ * Needed to workaround some HW bugs where we get all 0s
+ */
+ int null_edid_counter;
+
+ /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */
unsigned bad_edid_counter;
- /* Flag for raw EDID header corruption - used in Displayport
- * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+ /**
+ * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used
+ * in Displayport compliance testing - Displayport Link CTS Core 1.2
+ * rev1.1 4.2.2.6
*/
bool edid_corrupt;
+ /** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
/**
@@ -963,7 +1022,7 @@ struct drm_connector {
*
* Current atomic state for this connector.
*
- * This is protected by @drm_mode_config.connection_mutex. Note that
+ * This is protected by &drm_mode_config.connection_mutex. Note that
* nonblocking atomic commits access the current connector state without
* taking locks. Either by going through the &struct drm_atomic_state
* pointers, see for_each_oldnew_connector_in_state(),
@@ -974,19 +1033,44 @@ struct drm_connector {
*/
struct drm_connector_state *state;
- /* DisplayID bits */
+ /* DisplayID bits. FIXME: Extract into a substruct? */
+
+ /**
+ * @tile_blob_ptr:
+ *
+ * DRM blob property data for the tile property (used mostly by DP MST).
+ * This is meant for screens which are driven through separate display
+ * pipelines represented by &drm_crtc, which might not be running with
+ * genlocked clocks. For tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose
+ * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+ *
+ * This should only be updated by calling
+ * drm_connector_set_tile_property().
+ */
+ struct drm_property_blob *tile_blob_ptr;
+
+ /** @has_tile: is this connector connected to a tiled monitor */
bool has_tile;
+ /** @tile_group: tile group for the connected monitor */
struct drm_tile_group *tile_group;
+ /** @tile_is_single_monitor: whether the tile is one monitor housing */
bool tile_is_single_monitor;
+ /** @num_h_tile: number of horizontal tiles in the tile group */
+ /** @num_v_tile: number of vertical tiles in the tile group */
uint8_t num_h_tile, num_v_tile;
+ /** @tile_h_loc: horizontal location of this tile */
+ /** @tile_v_loc: vertical location of this tile */
uint8_t tile_h_loc, tile_v_loc;
+ /** @tile_h_size: horizontal size of this tile. */
+ /** @tile_v_size: vertical size of this tile. */
uint16_t tile_h_size, tile_v_size;
/**
* @free_node:
*
- * List used only by &drm_connector_iter to be able to clean up a
+ * List used only by &drm_connector_list_iter to be able to clean up a
* connector from any context, in conjunction with
* &drm_mode_config.connector_free_work.
*/
@@ -1001,15 +1085,21 @@ int drm_connector_init(struct drm_device *dev,
int connector_type);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
+
+static inline unsigned int drm_connector_index(const struct drm_connector *connector)
{
return connector->index;
}
+static inline u32 drm_connector_mask(const struct drm_connector *connector)
+{
+ return 1 << connector->index;
+}
+
/**
* drm_connector_lookup - lookup connector object
* @dev: DRM device
@@ -1089,20 +1179,25 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
const char * const modes[]);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_content_type_property(struct drm_device *dev);
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid);
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
- uint64_t link_status);
+int drm_connector_set_path_property(struct drm_connector *connector,
+ const char *path);
+int drm_connector_set_tile_property(struct drm_connector *connector);
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid);
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
@@ -1151,6 +1246,9 @@ struct drm_connector *
drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+
/**
* drm_for_each_connector_iter - connector_list iterator macro
* @connector: &struct drm_connector pointer used as cursor
@@ -1163,4 +1261,17 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
#define drm_for_each_connector_iter(connector, iter) \
while ((connector = drm_connector_list_iter_next(iter)))
+/**
+ * drm_connector_for_each_possible_encoder - iterate connector's possible encoders
+ * @connector: &struct drm_connector pointer
+ * @encoder: &struct drm_encoder pointer used as cursor
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
+ for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
+ (connector)->encoder_ids[(__i)] != 0; (__i)++) \
+ for_each_if((encoder) = \
+ drm_encoder_find((connector)->dev, NULL, \
+ (connector)->encoder_ids[(__i)])) \
+
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a2d81d2907a9..92e7fc7f05a4 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,21 +77,6 @@ struct drm_plane_helper_funcs;
/**
* struct drm_crtc_state - mutable CRTC state
- * @crtc: backpointer to the CRTC
- * @enable: whether the CRTC should be enabled, gates all other state
- * @active: whether the CRTC is actively displaying (used for DPMS)
- * @planes_changed: planes on this crtc are updated
- * @mode_changed: @mode or @enable has been changed
- * @active_changed: @active has been toggled.
- * @connectors_changed: connectors to this crtc have been updated
- * @zpos_changed: zpos values of planes on this crtc have been updated
- * @color_mgmt_changed: color management properties have changed (degamma or
- * gamma LUT or CSC matrix)
- * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
- * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
- * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
- * @mode_blob: &drm_property_blob for @mode
- * @state: backpointer to global drm_atomic_state
*
* Note that the distinction between @enable and @active is rather subtile:
* Flipping @active while @enable is set without changing anything else may
@@ -102,31 +87,127 @@ struct drm_plane_helper_funcs;
*
* The three booleans active_changed, connectors_changed and mode_changed are
* intended to indicate whether a full modeset is needed, rather than strictly
- * describing what has changed in a commit.
- * See also: drm_atomic_crtc_needs_modeset()
+ * describing what has changed in a commit. See also:
+ * drm_atomic_crtc_needs_modeset()
+ *
+ * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
+ * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
+ * state like @plane_mask so drivers not converted over to atomic helpers should
+ * not rely on these being accurate!
*/
struct drm_crtc_state {
+ /** @crtc: backpointer to the CRTC */
struct drm_crtc *crtc;
+ /**
+ * @enable: Whether the CRTC should be enabled, gates all other state.
+ * This controls reservations of shared resources. Actual hardware state
+ * is controlled by @active.
+ */
bool enable;
+
+ /**
+ * @active: Whether the CRTC is actively displaying (used for DPMS).
+ * Implies that @enable is set. The driver must not release any shared
+ * resources if @active is set to false but @enable still true, because
+ * userspace expects that a DPMS ON always succeeds.
+ *
+ * Hence drivers must not consult @active in their various
+ * &drm_mode_config_funcs.atomic_check callback to reject an atomic
+ * commit. They can consult it to aid in the computation of derived
+ * hardware state, since even in the DPMS OFF state the display hardware
+ * should be as much powered down as when the CRTC is completely
+ * disabled through setting @enable to false.
+ */
bool active;
- /* computed state bits used by helpers and drivers */
+ /**
+ * @planes_changed: Planes on this crtc are updated. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow.
+ */
bool planes_changed : 1;
+
+ /**
+ * @mode_changed: @mode or @enable has been changed. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ *
+ * Drivers are supposed to set this for any CRTC state changes that
+ * require a full modeset. They can also reset it to false if e.g. a
+ * @mode change can be done without a full modeset by only changing
+ * scaler settings.
+ */
bool mode_changed : 1;
+
+ /**
+ * @active_changed: @active has been toggled. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ */
bool active_changed : 1;
+
+ /**
+ * @connectors_changed: Connectors to this crtc have been updated,
+ * either in their state or routing. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ *
+ * Drivers are supposed to set this as-needed from their own atomic
+ * check code, e.g. from &drm_encoder_helper_funcs.atomic_check
+ */
bool connectors_changed : 1;
+ /**
+ * @zpos_changed: zpos values of planes on this crtc have been updated.
+ * Used by the atomic helpers and drivers to steer the atomic commit
+ * control flow.
+ */
bool zpos_changed : 1;
+ /**
+ * @color_mgmt_changed: Color management properties have changed
+ * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and
+ * drivers to steer the atomic commit control flow.
+ */
bool color_mgmt_changed : 1;
- /* attached planes bitmask:
- * WARNING: transitional helpers do not maintain plane_mask so
- * drivers not converted over to atomic helpers should not rely
- * on plane_mask being accurate!
+ /**
+ * @no_vblank:
+ *
+ * Reflects the ability of a CRTC to send VBLANK events. This state
+ * usually depends on the pipeline configuration, and the main usuage
+ * is CRTCs feeding a writeback connector operating in oneshot mode.
+ * In this case the VBLANK event is only generated when a job is queued
+ * to the writeback connector, and we want the core to fake VBLANK
+ * events when this part of the pipeline hasn't changed but others had
+ * or when the CRTC and connectors are being disabled.
+ *
+ * __drm_atomic_helper_crtc_duplicate_state() will not reset the value
+ * from the current state, the CRTC driver is then responsible for
+ * updating this field when needed.
+ *
+ * Note that the combination of &drm_crtc_state.event == NULL and
+ * &drm_crtc_state.no_blank == true is valid and usually used when the
+ * writeback connector attached to the CRTC has a new job queued. In
+ * this case the driver will send the VBLANK event on its own when the
+ * writeback job is complete.
+ */
+ bool no_vblank : 1;
+
+ /**
+ * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
+ * this CRTC.
*/
u32 plane_mask;
+ /**
+ * @connector_mask: Bitmask of drm_connector_mask(connector) of
+ * connectors attached to this CRTC.
+ */
u32 connector_mask;
+
+ /**
+ * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders
+ * attached to this CRTC.
+ */
u32 encoder_mask;
/**
@@ -134,10 +215,13 @@ struct drm_crtc_state {
*
* Internal display timings which can be used by the driver to handle
* differences between the mode requested by userspace in @mode and what
- * is actually programmed into the hardware. It is purely driver
- * implementation defined what exactly this adjusted mode means. Usually
- * it is used to store the hardware display timings used between the
- * CRTC and encoder blocks.
+ * is actually programmed into the hardware.
+ *
+ * For drivers using &drm_bridge, this stores hardware display timings
+ * used between the CRTC and the first bridge. For other drivers, the
+ * meaning of the adjusted_mode field is purely driver implementation
+ * defined information, and will usually be used to store the hardware
+ * display timings used between the CRTC and encoder blocks.
*/
struct drm_display_mode adjusted_mode;
@@ -158,7 +242,10 @@ struct drm_crtc_state {
*/
struct drm_display_mode mode;
- /* blob property to expose current mode to atomic userspace */
+ /**
+ * @mode_blob: &drm_property_blob for @mode, for exposing the mode to
+ * atomic userspace.
+ */
struct drm_property_blob *mode_blob;
/**
@@ -262,6 +349,7 @@ struct drm_crtc_state {
*/
struct drm_crtc_commit *commit;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
};
@@ -503,6 +591,8 @@ struct drm_crtc_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_crtc_state should use
* drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -529,6 +619,8 @@ struct drm_crtc_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -717,35 +809,25 @@ struct drm_crtc_funcs {
/**
* struct drm_crtc - central CRTC control structure
- * @dev: parent DRM device
- * @port: OF node used by drm_of_find_possible_crtcs()
- * @head: list management
- * @name: human readable name, can be overwritten by the driver
- * @mutex: per-CRTC locking
- * @base: base KMS object for ID tracking etc.
- * @primary: primary plane for this CRTC
- * @cursor: cursor plane for this CRTC
- * @cursor_x: current x position of the cursor, used for universal cursor planes
- * @cursor_y: current y position of the cursor, used for universal cursor planes
- * @enabled: is this CRTC enabled?
- * @mode: current mode timings
- * @hwmode: mode timings as programmed to hw regs
- * @x: x position on screen
- * @y: y position on screen
- * @funcs: CRTC control functions
- * @gamma_size: size of gamma ramp
- * @gamma_store: gamma ramp values
- * @helper_private: mid-layer private data
- * @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
*/
struct drm_crtc {
+ /** @dev: parent DRM device */
struct drm_device *dev;
+ /** @port: OF node used by drm_of_find_possible_crtcs(). */
struct device_node *port;
+ /**
+ * @head:
+ *
+ * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
struct list_head head;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -760,10 +842,25 @@ struct drm_crtc {
*/
struct drm_modeset_lock mutex;
+ /** @base: base KMS object for ID tracking etc. */
struct drm_mode_object base;
- /* primary and cursor planes for CRTC */
+ /**
+ * @primary:
+ * Primary plane for this CRTC. Note that this is only
+ * relevant for legacy IOCTL, it specifies the plane implicitly used by
+ * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance
+ * beyond that.
+ */
struct drm_plane *primary;
+
+ /**
+ * @cursor:
+ * Cursor plane for this CRTC. Note that this is only relevant for
+ * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR
+ * and SETCURSOR2 IOCTLs. It does not have any significance
+ * beyond that.
+ */
struct drm_plane *cursor;
/**
@@ -772,30 +869,94 @@ struct drm_crtc {
*/
unsigned index;
- /* position of cursor plane on crtc */
+ /**
+ * @cursor_x: Current x position of the cursor, used for universal
+ * cursor planes because the SETCURSOR IOCTL only can update the
+ * framebuffer without supplying the coordinates. Drivers should not use
+ * this directly, atomic drivers should look at &drm_plane_state.crtc_x
+ * of the cursor plane instead.
+ */
int cursor_x;
+ /**
+ * @cursor_y: Current y position of the cursor, used for universal
+ * cursor planes because the SETCURSOR IOCTL only can update the
+ * framebuffer without supplying the coordinates. Drivers should not use
+ * this directly, atomic drivers should look at &drm_plane_state.crtc_y
+ * of the cursor plane instead.
+ */
int cursor_y;
+ /**
+ * @enabled:
+ *
+ * Is this CRTC enabled? Should only be used by legacy drivers, atomic
+ * drivers should instead consult &drm_crtc_state.enable and
+ * &drm_crtc_state.active. Atomic drivers can update this by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
bool enabled;
- /* Requested mode from modesetting. */
+ /**
+ * @mode:
+ *
+ * Current mode timings. Should only be used by legacy drivers, atomic
+ * drivers should instead consult &drm_crtc_state.mode. Atomic drivers
+ * can update this by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
struct drm_display_mode mode;
- /* Programmed mode in hw, after adjustments for encoders,
- * crtc, panel scaling etc. Needed for timestamping etc.
+ /**
+ * @hwmode:
+ *
+ * Programmed mode in hw, after adjustments for encoders, crtc, panel
+ * scaling etc. Should only be used by legacy drivers, for high
+ * precision vblank timestamps in
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ *
+ * Note that atomic drivers should not use this, but instead use
+ * &drm_crtc_state.adjusted_mode. And for high-precision timestamps
+ * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+ * which is filled out by calling drm_calc_timestamping_constants().
*/
struct drm_display_mode hwmode;
- int x, y;
+ /**
+ * @x:
+ * x position on screen. Should only be used by legacy drivers, atomic
+ * drivers should look at &drm_plane_state.crtc_x of the primary plane
+ * instead. Updated by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
+ int x;
+ /**
+ * @y:
+ * y position on screen. Should only be used by legacy drivers, atomic
+ * drivers should look at &drm_plane_state.crtc_y of the primary plane
+ * instead. Updated by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
+ int y;
+
+ /** @funcs: CRTC control functions */
const struct drm_crtc_funcs *funcs;
- /* Legacy FB CRTC gamma size for reporting to userspace */
+ /**
+ * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
+ * by calling drm_mode_crtc_set_gamma_size().
+ */
uint32_t gamma_size;
+
+ /**
+ * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
+ * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ */
uint16_t *gamma_store;
- /* if you are using the helper */
+ /** @helper_private: mid-layer private data */
const struct drm_crtc_helper_funcs *helper_private;
+ /** @properties: property tracking for this CRTC */
struct drm_object_properties properties;
/**
@@ -865,7 +1026,6 @@ struct drm_crtc {
*
* spinlock to protect the fences in the fence_context.
*/
-
spinlock_t fence_lock;
/**
* @fence_seqno:
@@ -935,8 +1095,8 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* drm_crtc_mask - find the mask of a registered CRTC
* @crtc: CRTC to find mask for
*
- * Given a registered CRTC, return the mask bit of that CRTC for an
- * encoder's possible_crtcs field.
+ * Given a registered CRTC, return the mask bit of that CRTC for the
+ * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields.
*/
static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index 7d63b1d4adb9..b225eeb30d05 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -43,6 +43,7 @@ struct drm_crtc_crc_entry {
* @lock: protects the fields in this struct
* @source: name of the currently configured source of CRCs
* @opened: whether userspace has opened the data file for reading
+ * @overflow: whether an overflow occured.
* @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
* @head: head of circular queue
* @tail: tail of circular queue
@@ -52,7 +53,7 @@ struct drm_crtc_crc_entry {
struct drm_crtc_crc {
spinlock_t lock;
const char *source;
- bool opened;
+ bool opened, overflow;
struct drm_crtc_crc_entry *entries;
int head, tail;
size_t values_cnt;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 858ba19a3e29..f9c6e0e3aec7 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -74,6 +74,27 @@ struct drm_device {
struct mutex filelist_mutex;
struct list_head filelist;
+ /**
+ * @filelist_internal:
+ *
+ * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
+ */
+ struct list_head filelist_internal;
+
+ /**
+ * @clientlist_mutex:
+ *
+ * Protects @clientlist access.
+ */
+ struct mutex clientlist_mutex;
+
+ /**
+ * @clientlist:
+ *
+ * List of in-kernel clients. Protected by @clientlist_mutex.
+ */
+ struct list_head clientlist;
+
/** \name Memory management */
/*@{ */
struct list_head maplist; /**< Linked list of regions */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index c01564991a9f..05cc31b5db16 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1078,6 +1078,25 @@ struct drm_dp_aux_msg {
size_t size;
};
+struct cec_adapter;
+struct edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @name: name of the CEC adapter
+ * @parent: parent device of the CEC adapter
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ const char *name;
+ struct device *parent;
+ struct delayed_work unregister_work;
+};
+
/**
* struct drm_dp_aux - DisplayPort AUX channel
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -1136,6 +1155,10 @@ struct drm_dp_aux {
* @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
*/
unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -1258,4 +1281,37 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
return desc->quirks & BIT(quirk);
}
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ const char *name,
+ struct device *parent)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 7e545f5f94d3..46a8009784df 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -649,6 +649,35 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
return true;
}
+/**
+ * drm_core_check_feature - check driver feature flags
+ * @dev: DRM device to check
+ * @feature: feature flag
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features and the
+ * various DRIVER_\* flags.
+ *
+ * Returns true if the @feature is supported, false otherwise.
+ */
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+{
+ return dev->driver->driver_features & feature;
+}
+
+/**
+ * drm_drv_uses_atomic_modeset - check if the driver implements
+ * atomic_commit()
+ * @dev: DRM device
+ *
+ * This check is useful if drivers do not have DRIVER_ATOMIC set but
+ * have atomic modesetting internally implemented.
+ */
+static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
+ dev->mode_config.funcs->atomic_commit != NULL;
+}
+
int drm_dev_set_unique(struct drm_device *dev, const char *name);
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index fb299696c7c4..4f597c0730b4 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -191,12 +191,24 @@ int drm_encoder_init(struct drm_device *dev,
* Given a registered encoder, return the index of that encoder within a DRM
* device's list of encoders.
*/
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
{
return encoder->index;
}
/**
+ * drm_encoder_mask - find the mask of a registered ENCODER
+ * @encoder: encoder to find mask for
+ *
+ * Given a registered encoder, return the mask bit of that encoder for an
+ * encoder's possible_clones field.
+ */
+static inline u32 drm_encoder_mask(const struct drm_encoder *encoder)
+{
+ return 1 << drm_encoder_index(encoder);
+}
+
+/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
* @crtc: crtc to test
@@ -241,7 +253,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder);
*/
#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
- for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+ for_each_if ((encoder_mask) & drm_encoder_mask(encoder))
/**
* drm_for_each_encoder - iterate over all encoders
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index d532f88a8d55..96e26e3b9a0c 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,16 +16,10 @@ struct drm_mode_fb_cmd2;
struct drm_plane;
struct drm_plane_state;
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs);
int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
unsigned int max_conn_count);
void drm_fb_cma_fbdev_fini(struct drm_device *dev);
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs);
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int max_conn_count);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index b069433e7fc1..5db08c8f1d25 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -32,6 +32,7 @@
struct drm_fb_helper;
+#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <linux/kgdb.h>
@@ -154,6 +155,20 @@ struct drm_fb_helper_connector {
* operations.
*/
struct drm_fb_helper {
+ /**
+ * @client:
+ *
+ * DRM client used by the generic fbdev emulation.
+ */
+ struct drm_client_dev client;
+
+ /**
+ * @buffer:
+ *
+ * Framebuffer used by the generic fbdev emulation.
+ */
+ struct drm_client_buffer *buffer;
+
struct drm_framebuffer *fb;
struct drm_device *dev;
int crtc_count;
@@ -234,6 +249,12 @@ struct drm_fb_helper {
int preferred_bpp;
};
+static inline struct drm_fb_helper *
+drm_fb_helper_from_client(struct drm_client_dev *client)
+{
+ return container_of(client, struct drm_fb_helper, client);
+}
+
/**
* define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
*
@@ -330,6 +351,10 @@ void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
+
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -564,6 +589,19 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
}
+static inline int
+drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ return 0;
+}
+
+static inline int
+drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ return 0;
+}
+
#endif
static inline int
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 027ac16da3d1..26485acc51d7 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -193,6 +193,13 @@ struct drm_file {
unsigned aspect_ratio_allowed:1;
/**
+ * @writeback_connectors:
+ *
+ * True if client understands writeback connectors
+ */
+ unsigned writeback_connectors:1;
+
+ /**
* @is_master:
*
* This client is the creator of @master. Protected by struct
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 3e86408dac9f..f9c15845f465 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -39,6 +39,7 @@ struct drm_mode_fb_cmd2;
* @hsub: Horizontal chroma subsampling factor
* @vsub: Vertical chroma subsampling factor
* @has_alpha: Does the format embeds an alpha component?
+ * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
u32 format;
@@ -48,6 +49,7 @@ struct drm_format_info {
u8 hsub;
u8 vsub;
bool has_alpha;
+ bool is_yuv;
};
/**
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 101f566ae43d..2c3bbb43c7d1 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -109,6 +109,38 @@ enum drm_mm_insert_mode {
* Allocates the node from the bottom of the found hole.
*/
DRM_MM_INSERT_EVICT,
+
+ /**
+ * @DRM_MM_INSERT_ONCE:
+ *
+ * Only check the first hole for suitablity and report -ENOSPC
+ * immediately otherwise, rather than check every hole until a
+ * suitable one is found. Can only be used in conjunction with another
+ * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
+ */
+ DRM_MM_INSERT_ONCE = BIT(31),
+
+ /**
+ * @DRM_MM_INSERT_HIGHEST:
+ *
+ * Only check the highest hole (the hole with the largest address) and
+ * insert the node at the top of the hole or report -ENOSPC if
+ * unsuitable.
+ *
+ * Does not search all holes.
+ */
+ DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
+
+ /**
+ * @DRM_MM_INSERT_LOWEST:
+ *
+ * Only check the lowest hole (the hole with the smallest address) and
+ * insert the node at the bottom of the hole or report -ENOSPC if
+ * unsuitable.
+ *
+ * Does not search all holes.
+ */
+ DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
};
/**
@@ -173,7 +205,7 @@ struct drm_mm {
struct drm_mm_node head_node;
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
struct rb_root_cached interval_tree;
- struct rb_root holes_size;
+ struct rb_root_cached holes_size;
struct rb_root holes_addr;
unsigned long scan_active;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 33b3a96d66d0..a0b202e1d69a 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -329,10 +329,10 @@ struct drm_mode_config_funcs {
/**
* struct drm_mode_config - Mode configuration control structure
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
+ * @min_width: minimum fb pixel width on this device
+ * @min_height: minimum fb pixel height on this device
+ * @max_width: maximum fb pixel width on this device
+ * @max_height: maximum fb pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
@@ -727,6 +727,11 @@ struct drm_mode_config {
*/
struct drm_property *aspect_ratio_property;
/**
+ * @content_type_property: Optional connector property to control the
+ * HDMI infoframe content type setting.
+ */
+ struct drm_property *content_type_property;
+ /**
* @degamma_lut_property: Optional CRTC property to set the LUT used to
* convert the framebuffer's colors to linear gamma.
*/
@@ -779,6 +784,29 @@ struct drm_mode_config {
*/
struct drm_property *panel_orientation_property;
+ /**
+ * @writeback_fb_id_property: Property for writeback connectors, storing
+ * the ID of the output framebuffer.
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_fb_id_property;
+
+ /**
+ * @writeback_pixel_formats_property: Property for writeback connectors,
+ * storing an array of the supported pixel formats for the writeback
+ * engine (read-only).
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_pixel_formats_property;
+ /**
+ * @writeback_out_fence_ptr_property: Property for writeback connectors,
+ * fd pointer representing the outgoing fences for a writeback
+ * connector. Userspace should provide a pointer to a value of type s32,
+ * and then cast that pointer to u64.
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_out_fence_ptr_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index b159fe07fcf9..baded6514456 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -530,7 +530,7 @@ drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
-void drm_mode_connector_list_update(struct drm_connector *connector);
+void drm_connector_list_update(struct drm_connector *connector);
/* parsing cmdline modes */
bool
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 35e2a3a79fc5..61142aa0ab23 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -785,7 +785,7 @@ struct drm_connector_helper_funcs {
*
* This function should fill in all modes currently valid for the sink
* into the &drm_connector.probed_modes list. It should also update the
- * EDID property by calling drm_mode_connector_update_edid_property().
+ * EDID property by calling drm_connector_update_edid_property().
*
* The usual way to implement this is to cache the EDID retrieved in the
* probe callback somewhere in the driver-private connector structure.
@@ -974,6 +974,21 @@ struct drm_connector_helper_funcs {
*/
int (*atomic_check)(struct drm_connector *connector,
struct drm_connector_state *state);
+
+ /**
+ * @atomic_commit:
+ *
+ * This hook is to be used by drivers implementing writeback connectors
+ * that need a point when to commit the writeback job to the hardware.
+ * The writeback_job to commit is available in
+ * &drm_connector_state.writeback_job.
+ *
+ * This hook is optional.
+ *
+ * This callback is used by the atomic modeset helpers.
+ */
+ void (*atomic_commit)(struct drm_connector *connector,
+ struct drm_connector_state *state);
};
/**
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b93c239afb60..ead34ab5ca4e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -17,6 +17,8 @@ struct drm_bridge;
struct device_node;
#ifdef CONFIG_OF
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port);
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port);
void drm_of_component_match_add(struct device *master,
@@ -34,6 +36,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel,
struct drm_bridge **bridge);
#else
+static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
+{
+ return 0;
+}
+
static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 14ac240a1f64..582a0ec0aa70 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,6 +89,7 @@ struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
+ struct device_link *link;
const struct drm_panel_funcs *funcs;
@@ -199,7 +200,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
#endif
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 674599025d7d..8181e9e7cf1d 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -58,11 +58,4 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev,
}
#endif
-#define DRM_PCIE_SPEED_25 1
-#define DRM_PCIE_SPEED_50 2
-#define DRM_PCIE_SPEED_80 4
-
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
-
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 26fa50c2a50e..8a152dc16ea5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -34,31 +34,15 @@ struct drm_modeset_acquire_ctx;
/**
* struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- * plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- * plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @alpha: opacity of the plane
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * Note that multiple active planes on the same crtc can have an identical
- * zpos value. The rule to solving the conflict is to compare the plane
- * object IDs; the plane with a higher ID must be stacked on top of a
- * plane with a lower ID.
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- * where N is the number of active planes for given crtc. Note that
- * the driver must set drm_mode_config.normalize_zpos or call
- * drm_atomic_normalize_zpos() to update this before it can be trusted.
- * @src: clipped source coordinates of the plane (in 16.16)
- * @dst: clipped destination coordinates of the plane
- * @state: backpointer to global drm_atomic_state
+ *
+ * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
+ * raw coordinates provided by userspace. Drivers should use
+ * drm_atomic_helper_check_plane_state() and only use the derived rectangles in
+ * @src and @dst to program the hardware.
*/
struct drm_plane_state {
+ /** @plane: backpointer to the plane */
struct drm_plane *plane;
/**
@@ -87,7 +71,7 @@ struct drm_plane_state {
* preserved.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
* and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -108,20 +92,60 @@ struct drm_plane_state {
*/
int32_t crtc_y;
+ /** @crtc_w: width of visible portion of plane on crtc */
+ /** @crtc_h: height of visible portion of plane on crtc */
uint32_t crtc_w, crtc_h;
- /* Source values are 16.16 fixed point */
- uint32_t src_x, src_y;
+ /**
+ * @src_x: left position of visible portion of plane within plane (in
+ * 16.16 fixed point).
+ */
+ uint32_t src_x;
+ /**
+ * @src_y: upper position of visible portion of plane within plane (in
+ * 16.16 fixed point).
+ */
+ uint32_t src_y;
+ /** @src_w: width of visible portion of plane (in 16.16) */
+ /** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
- /* Plane opacity */
+ /**
+ * @alpha:
+ * Opacity of the plane with 0 as completely transparent and 0xffff as
+ * completely opaque. See drm_plane_create_alpha_property() for more
+ * details.
+ */
u16 alpha;
- /* Plane rotation */
+ /**
+ * @rotation:
+ * Rotation of the plane. See drm_plane_create_rotation_property() for
+ * more details.
+ */
unsigned int rotation;
- /* Plane zpos */
+ /**
+ * @zpos:
+ * Priority of the given plane on crtc (optional).
+ *
+ * Note that multiple active planes on the same crtc can have an
+ * identical zpos value. The rule to solving the conflict is to compare
+ * the plane object IDs; the plane with a higher ID must be stacked on
+ * top of a plane with a lower ID.
+ *
+ * See drm_plane_create_zpos_property() and
+ * drm_plane_create_zpos_immutable_property() for more details.
+ */
unsigned int zpos;
+
+ /**
+ * @normalized_zpos:
+ * Normalized value of zpos: unique, range from 0 to N-1 where N is the
+ * number of active planes for given crtc. Note that the driver must set
+ * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to
+ * update this before it can be trusted.
+ */
unsigned int normalized_zpos;
/**
@@ -138,7 +162,8 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
- /* Clipped coordinates */
+ /** @src: clipped source coordinates of the plane (in 16.16) */
+ /** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
/**
@@ -157,6 +182,7 @@ struct drm_plane_state {
*/
struct drm_crtc_commit *commit;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
};
@@ -288,6 +314,8 @@ struct drm_plane_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_plane_state should use
* drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -314,6 +342,8 @@ struct drm_plane_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -431,7 +461,10 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier).
+ * which modifier), and to valdiate modifiers at atomic_check time.
+ *
+ * If not present, then any modifier in the plane's modifier
+ * list is allowed with any of the plane's formats.
*
* Returns:
*
@@ -492,30 +525,27 @@ enum drm_plane_type {
/**
* struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @modifiers: array of modifiers supported by this plane
- * @modifier_count: number of modifiers supported
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- * drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @alpha_property: alpha property for this plane
- * @zpos_property: zpos property for this plane
- * @rotation_property: rotation property for this plane
- * @helper_private: mid-layer private data
+ *
+ * Planes represent the scanout hardware of a display block. They receive their
+ * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control
+ * the color conversion, see `Plane Composition Properties`_ for more details,
+ * and are also involved in the color conversion of input pixels, see `Color
+ * Management Properties`_ for details on that.
*/
struct drm_plane {
+ /** @dev: DRM device this plane belongs to */
struct drm_device *dev;
+
+ /**
+ * @head:
+ *
+ * List of all planes on @dev, linked from &drm_mode_config.plane_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
struct list_head head;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -529,35 +559,62 @@ struct drm_plane {
*/
struct drm_modeset_lock mutex;
+ /** @base: base mode object */
struct drm_mode_object base;
+ /**
+ * @possible_crtcs: pipes this plane can be bound to constructed from
+ * drm_crtc_mask()
+ */
uint32_t possible_crtcs;
+ /** @format_types: array of formats supported by this plane */
uint32_t *format_types;
+ /** @format_count: Size of the array pointed at by @format_types. */
unsigned int format_count;
+ /**
+ * @format_default: driver hasn't supplied supported formats for the
+ * plane. Used by the drm_plane_init compatibility wrapper only.
+ */
bool format_default;
+ /** @modifiers: array of modifiers supported by this plane */
uint64_t *modifiers;
+ /** @modifier_count: Size of the array pointed at by @modifier_count. */
unsigned int modifier_count;
/**
- * @crtc: Currently bound CRTC, only really meaningful for non-atomic
- * drivers. Atomic drivers should instead check &drm_plane_state.crtc.
+ * @crtc:
+ *
+ * Currently bound CRTC, only meaningful for non-atomic drivers. For
+ * atomic drivers this is forced to be NULL, atomic drivers should
+ * instead check &drm_plane_state.crtc.
*/
struct drm_crtc *crtc;
/**
- * @fb: Currently bound framebuffer, only really meaningful for
- * non-atomic drivers. Atomic drivers should instead check
- * &drm_plane_state.fb.
+ * @fb:
+ *
+ * Currently bound framebuffer, only meaningful for non-atomic drivers.
+ * For atomic drivers this is forced to be NULL, atomic drivers should
+ * instead check &drm_plane_state.fb.
*/
struct drm_framebuffer *fb;
+ /**
+ * @old_fb:
+ *
+ * Temporary tracking of the old fb while a modeset is ongoing. Only
+ * used by non-atomic drivers, forced to be NULL for atomic drivers.
+ */
struct drm_framebuffer *old_fb;
+ /** @funcs: plane control functions */
const struct drm_plane_funcs *funcs;
+ /** @properties: property tracking for this plane */
struct drm_object_properties properties;
+ /** @type: Type of plane, see &enum drm_plane_type for details. */
enum drm_plane_type type;
/**
@@ -566,6 +623,7 @@ struct drm_plane {
*/
unsigned index;
+ /** @helper_private: mid-layer private data */
const struct drm_plane_helper_funcs *helper_private;
/**
@@ -583,8 +641,23 @@ struct drm_plane {
*/
struct drm_plane_state *state;
+ /**
+ * @alpha_property:
+ * Optional alpha property for this plane. See
+ * drm_plane_create_alpha_property().
+ */
struct drm_property *alpha_property;
+ /**
+ * @zpos_property:
+ * Optional zpos property for this plane. See
+ * drm_plane_create_zpos_property().
+ */
struct drm_property *zpos_property;
+ /**
+ * @rotation_property:
+ * Optional rotation property for this plane. See
+ * drm_plane_create_rotation_property().
+ */
struct drm_property *rotation_property;
/**
@@ -632,10 +705,20 @@ void drm_plane_cleanup(struct drm_plane *plane);
* Given a registered plane, return the index of that plane within a DRM
* device's list of planes.
*/
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
+static inline unsigned int drm_plane_index(const struct drm_plane *plane)
{
return plane->index;
}
+
+/**
+ * drm_plane_mask - find the mask of a registered plane
+ * @plane: plane to find mask for
+ */
+static inline u32 drm_plane_mask(const struct drm_plane *plane)
+{
+ return 1 << drm_plane_index(plane);
+}
+
struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
void drm_plane_force_disable(struct drm_plane *plane);
@@ -671,7 +754,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
*/
#define drm_for_each_plane_mask(plane, dev, plane_mask) \
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
- for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+ for_each_if ((plane_mask) & drm_plane_mask(plane))
/**
* drm_for_each_legacy_plane - iterate over all planes for legacy userspace
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 28d7ce620729..26cee2934781 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -67,8 +67,10 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int drm_plane_helper_disable(struct drm_plane *plane);
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
/* For use by drm_crtc_helper.c */
int drm_plane_helper_commit(struct drm_plane *plane,
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 4d5f5d6cf6a6..d716d653b096 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
@@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num);
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr);
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index e1a46e9991cc..f3e6eed3e79c 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -69,16 +69,21 @@
struct drm_printer {
/* private: */
void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+ void (*puts)(struct drm_printer *p, const char *str);
void *arg;
const char *prefix;
};
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_coredump(struct drm_printer *p, const char *str);
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
+void drm_puts(struct drm_printer *p, const char *str);
__printf(2, 0)
/**
@@ -105,6 +110,71 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
/**
+ * struct drm_print_iterator - local struct used with drm_printer_coredump
+ * @data: Pointer to the devcoredump output buffer
+ * @start: The offset within the buffer to start writing
+ * @remain: The number of bytes to write for this iteration
+ */
+struct drm_print_iterator {
+ void *data;
+ ssize_t start;
+ ssize_t remain;
+ /* private: */
+ ssize_t offset;
+};
+
+/**
+ * drm_coredump_printer - construct a &drm_printer that can output to a buffer
+ * from the read function for devcoredump
+ * @iter: A pointer to a struct drm_print_iterator for the read instance
+ *
+ * This wrapper extends drm_printf() to work with a dev_coredumpm() callback
+ * function. The passed in drm_print_iterator struct contains the buffer
+ * pointer, size and offset as passed in from devcoredump.
+ *
+ * For example::
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = offset;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * }
+ *
+ * void makecoredump(...)
+ * {
+ * ...
+ * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL,
+ * coredump_read, ...)
+ * }
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer
+drm_coredump_printer(struct drm_print_iterator *iter)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_coredump,
+ .puts = __drm_puts_coredump,
+ .arg = iter,
+ };
+
+ /* Set the internal offset of the iterator to zero */
+ iter->offset = 0;
+
+ return p;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
@@ -115,6 +185,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
{
struct drm_printer p = {
.printfn = __drm_printfn_seq_file,
+ .puts = __drm_puts_seq_file,
.arg = f,
};
return p;
@@ -195,6 +266,7 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_VBL 0x20
#define DRM_UT_STATE 0x40
#define DRM_UT_LEASE 0x80
+#define DRM_UT_DP 0x100
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
@@ -307,6 +379,11 @@ void drm_err(const char *format, ...);
#define DRM_DEBUG_LEASE(fmt, ...) \
drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
+#define DRM_DEBUG_DP(dev, fmt, ...) \
+ drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 1d5c0b2a8956..c030f6ccab99 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -147,10 +147,10 @@ struct drm_property {
* properties are not exposed to legacy userspace.
*
* DRM_MODE_PROP_IMMUTABLE
- * Set for properties where userspace cannot be changed by
+ * Set for properties whose values cannot be changed by
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
- * usersapce, e.g. the EDID, or the connector path property on DP
+ * userspace, e.g. the EDID, or the connector path property on DP
* MST sinks.
*/
uint32_t flags;
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 8758df94e9a0..c7987daeaed0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
+ bool readonly:1;
};
struct drm_vma_offset_manager {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
new file mode 100644
index 000000000000..23df9d463003
--- /dev/null
+++ b/include/drm/drm_writeback.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef __DRM_WRITEBACK_H__
+#define __DRM_WRITEBACK_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <linux/workqueue.h>
+
+struct drm_writeback_connector {
+ struct drm_connector base;
+
+ /**
+ * @encoder: Internal encoder used by the connector to fulfill
+ * the DRM framework requirements. The users of the
+ * @drm_writeback_connector control the behaviour of the @encoder
+ * by passing the @enc_funcs parameter to drm_writeback_connector_init()
+ * function.
+ */
+ struct drm_encoder encoder;
+
+ /**
+ * @pixel_formats_blob_ptr:
+ *
+ * DRM blob property data for the pixel formats list on writeback
+ * connectors
+ * See also drm_writeback_connector_init()
+ */
+ struct drm_property_blob *pixel_formats_blob_ptr;
+
+ /** @job_lock: Protects job_queue */
+ spinlock_t job_lock;
+
+ /**
+ * @job_queue:
+ *
+ * Holds a list of a connector's writeback jobs; the last item is the
+ * most recent. The first item may be either waiting for the hardware
+ * to begin writing, or currently being written.
+ *
+ * See also: drm_writeback_queue_job() and
+ * drm_writeback_signal_completion()
+ */
+ struct list_head job_queue;
+
+ /**
+ * @fence_context:
+ *
+ * timeline context used for fence operations.
+ */
+ unsigned int fence_context;
+ /**
+ * @fence_lock:
+ *
+ * spinlock to protect the fences in the fence_context.
+ */
+ spinlock_t fence_lock;
+ /**
+ * @fence_seqno:
+ *
+ * Seqno variable used as monotonic counter for the fences
+ * created on the connector's timeline.
+ */
+ unsigned long fence_seqno;
+ /**
+ * @timeline_name:
+ *
+ * The name of the connector's fence timeline.
+ */
+ char timeline_name[32];
+};
+
+struct drm_writeback_job {
+ /**
+ * @cleanup_work:
+ *
+ * Used to allow drm_writeback_signal_completion to defer dropping the
+ * framebuffer reference to a workqueue
+ */
+ struct work_struct cleanup_work;
+
+ /**
+ * @list_entry:
+ *
+ * List item for the writeback connector's @job_queue
+ */
+ struct list_head list_entry;
+
+ /**
+ * @fb:
+ *
+ * Framebuffer to be written to by the writeback connector. Do not set
+ * directly, use drm_atomic_set_writeback_fb_for_connector()
+ */
+ struct drm_framebuffer *fb;
+
+ /**
+ * @out_fence:
+ *
+ * Fence which will signal once the writeback has completed
+ */
+ struct dma_fence *out_fence;
+};
+
+static inline struct drm_writeback_connector *
+drm_connector_to_writeback(struct drm_connector *connector)
+{
+ return container_of(connector, struct drm_writeback_connector, base);
+}
+
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats);
+
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job);
+
+void drm_writeback_cleanup_job(struct drm_writeback_job *job);
+
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector);
+#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dec655894d08..21c648b0b2a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,6 +27,8 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
+#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
@@ -43,18 +45,37 @@ enum drm_sched_priority {
};
/**
- * drm_sched_entity - A wrapper around a job queue (typically attached
- * to the DRM file_priv).
+ * struct drm_sched_entity - A wrapper around a job queue (typically
+ * attached to the DRM file_priv).
+ *
+ * @list: used to append this struct to the list of entities in the
+ * runqueue.
+ * @rq: runqueue to which this entity belongs.
+ * @rq_lock: lock to modify the runqueue to which this entity belongs.
+ * @job_queue: the list of jobs of this entity.
+ * @fence_seq: a linearly increasing seqno incremented with each
+ * new &drm_sched_fence which is part of the entity.
+ * @fence_context: a unique context for all the fences which belong
+ * to this entity.
+ * The &drm_sched_fence.scheduled uses the
+ * fence_context but &drm_sched_fence.finished uses
+ * fence_context + 1.
+ * @dependency: the dependency fence of the job which is on the top
+ * of the job queue.
+ * @cb: callback for the dependency fence above.
+ * @guilty: points to ctx's guilty.
+ * @fini_status: contains the exit status in case the process was signalled.
+ * @last_scheduled: points to the finished fence of the last scheduled job.
+ * @last_user: last group leader pushing a job into the entity.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
-*/
+ */
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
spinlock_t rq_lock;
- struct drm_gpu_scheduler *sched;
struct spsc_queue job_queue;
@@ -63,47 +84,98 @@ struct drm_sched_entity {
struct dma_fence *dependency;
struct dma_fence_cb cb;
- atomic_t *guilty; /* points to ctx's guilty */
- int fini_status;
- struct dma_fence *last_scheduled;
+ atomic_t *guilty;
+ struct dma_fence *last_scheduled;
+ struct task_struct *last_user;
};
/**
+ * struct drm_sched_rq - queue of entities to be scheduled.
+ *
+ * @lock: to modify the entities list.
+ * @sched: the scheduler to which this rq belongs to.
+ * @entities: list of the entities to be scheduled.
+ * @current_entity: the entity which is to be scheduled.
+ *
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
-*/
+ */
struct drm_sched_rq {
spinlock_t lock;
+ struct drm_gpu_scheduler *sched;
struct list_head entities;
struct drm_sched_entity *current_entity;
};
+/**
+ * struct drm_sched_fence - fences corresponding to the scheduling of a job.
+ */
struct drm_sched_fence {
+ /**
+ * @scheduled: this fence is what will be signaled by the scheduler
+ * when the job is scheduled.
+ */
struct dma_fence scheduled;
- /* This fence is what will be signaled by the scheduler when
- * the job is completed.
- *
- * When setting up an out fence for the job, you should use
- * this, since it's available immediately upon
- * drm_sched_job_init(), and the fence returned by the driver
- * from run_job() won't be created until the dependencies have
- * resolved.
- */
+ /**
+ * @finished: this fence is what will be signaled by the scheduler
+ * when the job is completed.
+ *
+ * When setting up an out fence for the job, you should use
+ * this, since it's available immediately upon
+ * drm_sched_job_init(), and the fence returned by the driver
+ * from run_job() won't be created until the dependencies have
+ * resolved.
+ */
struct dma_fence finished;
+ /**
+ * @cb: the callback for the parent fence below.
+ */
struct dma_fence_cb cb;
+ /**
+ * @parent: the fence returned by &drm_sched_backend_ops.run_job
+ * when scheduling the job on hardware. We signal the
+ * &drm_sched_fence.finished fence once parent is signalled.
+ */
struct dma_fence *parent;
+ /**
+ * @sched: the scheduler instance to which the job having this struct
+ * belongs to.
+ */
struct drm_gpu_scheduler *sched;
+ /**
+ * @lock: the lock used by the scheduled and the finished fences.
+ */
spinlock_t lock;
+ /**
+ * @owner: job owner for debugging
+ */
void *owner;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
/**
- * drm_sched_job - A job to be run by an entity.
+ * struct drm_sched_job - A job to be run by an entity.
+ *
+ * @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @sched: the scheduler instance on which this job is scheduled.
+ * @s_fence: contains the fences for the scheduling of job.
+ * @finish_cb: the callback for the finished fence.
+ * @finish_work: schedules the function @drm_sched_job_finish once the job has
+ * finished to remove the job from the
+ * @drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
+ * interval is over.
+ * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @karma: increment on every hang caused by this job. If this exceeds the hang
+ * limit of the scheduler then the job is marked guilty and will not
+ * be scheduled further.
+ * @s_priority: the priority of the job.
+ * @entity: the entity to which this job belongs.
*
* A job is created by the driver using drm_sched_job_init(), and
* should call drm_sched_entity_push_job() once it wants the scheduler
@@ -130,38 +202,64 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
}
/**
+ * struct drm_sched_backend_ops
+ *
* Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
+ * these functions should be implemented in driver side.
+ */
struct drm_sched_backend_ops {
- /* Called when the scheduler is considering scheduling this
- * job next, to get another struct dma_fence for this job to
+ /**
+ * @dependency: Called when the scheduler is considering scheduling
+ * this job next, to get another struct dma_fence for this job to
* block on. Once it returns NULL, run_job() may be called.
*/
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
- /* Called to execute the job once all of the dependencies have
- * been resolved. This may be called multiple times, if
+ /**
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved. This may be called multiple times, if
* timedout_job() has happened and drm_sched_job_recovery()
* decides to try it again.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
- /* Called when a job has taken too long to execute, to trigger
- * GPU recovery.
+ /**
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
*/
void (*timedout_job)(struct drm_sched_job *sched_job);
- /* Called once the job's finished fence has been signaled and
- * it's time to clean it up.
+ /**
+ * @free_job: Called once the job's finished fence has been signaled
+ * and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
};
/**
- * One scheduler is implemented for each hardware ring
-*/
+ * struct drm_gpu_scheduler
+ *
+ * @ops: backend operations provided by the driver.
+ * @hw_submission_limit: the max size of the hardware queue.
+ * @timeout: the time after which a job is removed from the scheduler.
+ * @name: name of the ring for which this scheduler is being used.
+ * @sched_rq: priority wise array of run queues.
+ * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
+ * is ready to be scheduled.
+ * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * waits on this wait queue until all the scheduled jobs are
+ * finished.
+ * @hw_rq_count: the number of jobs currently in the hardware queue.
+ * @job_id_count: used to assign unique id to the each job.
+ * @thread: the kthread on which the scheduler which run.
+ * @ring_mirror_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the ring_mirror_list.
+ * @hang_limit: once the hangs by a job crosses this limit then it is marked
+ * guilty and it will be considered for scheduling further.
+ *
+ * One scheduler is implemented for each hardware ring.
+ */
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
uint32_t hw_submission_limit;
@@ -184,16 +282,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty);
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -204,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 346b1f5cb180..fca22d463e1b 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -24,101 +24,26 @@
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
+#include "drm_audio_component.h"
+
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
*/
#define MAX_PORTS 6
/**
- * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
- */
-struct i915_audio_component_ops {
- /**
- * @owner: i915 module
- */
- struct module *owner;
- /**
- * @get_power: get the POWER_DOMAIN_AUDIO power well
- *
- * Request the power well to be turned on.
- */
- void (*get_power)(struct device *);
- /**
- * @put_power: put the POWER_DOMAIN_AUDIO power well
- *
- * Allow the power well to be turned off.
- */
- void (*put_power)(struct device *);
- /**
- * @codec_wake_override: Enable/disable codec wake signal
- */
- void (*codec_wake_override)(struct device *, bool enable);
- /**
- * @get_cdclk_freq: Get the Core Display Clock in kHz
- */
- int (*get_cdclk_freq)(struct device *);
- /**
- * @sync_audio_rate: set n/cts based on the sample rate
- *
- * Called from audio driver. After audio driver sets the
- * sample rate, it will call this function to set n/cts
- */
- int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
- /**
- * @get_eld: fill the audio state and ELD bytes for the given port
- *
- * Called from audio driver to get the HDMI/DP audio state of the given
- * digital port, and also fetch ELD bytes to the given pointer.
- *
- * It returns the byte size of the original ELD (not the actually
- * copied size), zero for an invalid ELD, or a negative error code.
- *
- * Note that the returned size may be over @max_bytes. Then it
- * implies that only a part of ELD has been copied to the buffer.
- */
- int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
- unsigned char *buf, int max_bytes);
-};
-
-/**
- * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
- */
-struct i915_audio_component_audio_ops {
- /**
- * @audio_ptr: Pointer to be used in call to pin_eld_notify
- */
- void *audio_ptr;
- /**
- * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
- *
- * Called when the i915 driver has set up audio pipeline or has just
- * begun to tear it down. This allows the HDA driver to update its
- * status accordingly (even when the HDA controller is in power save
- * mode).
- */
- void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
-};
-
-/**
* struct i915_audio_component - Used for direct communication between i915 and hda drivers
*/
struct i915_audio_component {
/**
- * @dev: i915 device, used as parameter for ops
+ * @base: the drm_audio_component base class
*/
- struct device *dev;
+ struct drm_audio_component base;
+
/**
* @aud_sample_rate: the array of audio sample rate per port
*/
int aud_sample_rate[MAX_PORTS];
- /**
- * @ops: Ops implemented by i915 driver, called by hda driver
- */
- const struct i915_audio_component_ops *ops;
- /**
- * @audio_ops: Ops implemented by hda driver, called by i915 driver
- */
- const struct i915_audio_component_audio_ops *audio_ops;
};
#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c9e5a6621b95..c44703f471b3 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
-#define INTEL_BSM 0x5c
+#define INTEL_BSM 0x5c
+#define INTEL_GEN11_BSM_DW0 0xc0
+#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index bab70ff6e78b..fbf5cfc9b352 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -349,7 +349,6 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
- INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
@@ -365,11 +364,17 @@
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
+/* AML/KBL Y GT2 */
+#define INTEL_AML_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
- INTEL_KBL_GT4_IDS(info)
+ INTEL_KBL_GT4_IDS(info), \
+ INTEL_AML_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -388,32 +393,40 @@
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
-/* CFL U GT1 */
-#define INTEL_CFL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
-
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
INTEL_VGA_DEVICE(0x3EA9, info)
/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA1, info)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA0, info)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA2, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
+
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
INTEL_CFL_S_GT2_IDS(info), \
INTEL_CFL_H_GT2_IDS(info), \
- INTEL_CFL_U_GT1_IDS(info), \
INTEL_CFL_U_GT2_IDS(info), \
- INTEL_CFL_U_GT3_IDS(info)
+ INTEL_CFL_U_GT3_IDS(info), \
+ INTEL_WHL_U_GT1_IDS(info), \
+ INTEL_WHL_U_GT2_IDS(info), \
+ INTEL_WHL_U_GT3_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 56e4a916b5e8..fe9827d0ca8a 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -16,16 +16,31 @@
/**
* struct tinydrm_device - tinydrm device
- * @drm: DRM device
- * @pipe: Display pipe structure
- * @dirty_lock: Serializes framebuffer flushing
- * @fb_funcs: Framebuffer functions used when creating framebuffers
*/
struct tinydrm_device {
+ /**
+ * @drm: DRM device
+ */
struct drm_device *drm;
+
+ /**
+ * @pipe: Display pipe structure
+ */
struct drm_simple_display_pipe pipe;
+
+ /**
+ * @dirty_lock: Serializes framebuffer flushing
+ */
struct mutex dirty_lock;
+
+ /**
+ * @fb_funcs: Framebuffer functions used when creating framebuffers
+ */
const struct drm_framebuffer_funcs *fb_funcs;
+
+ /**
+ * @fb_dirty: Framebuffer dirty callback
+ */
int (*fb_dirty)(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv, unsigned flags,
unsigned color, struct drm_clip_rect *clips,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c67977aa1a0e..a01ba2032f0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -284,17 +284,29 @@ struct ttm_operation_ctx {
#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_get instead.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
- kref_get(&bo->kref);
+ ttm_bo_get(bo);
return bo;
}
@@ -346,11 +358,22 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
+/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_put instead.
*/
void ttm_bo_unref(struct ttm_buffer_object **bo);
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
new file mode 100644
index 000000000000..7c492b49e38c
--- /dev/null
+++ b/include/drm/ttm/ttm_set_memory.h
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Huang Rui <ray.huang@amd.com>
+ */
+
+#ifndef TTM_SET_MEMORY
+#define TTM_SET_MEMORY
+
+#include <linux/mm.h>
+
+#ifdef CONFIG_X86
+
+#include <asm/set_memory.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return set_pages_array_wb(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return set_pages_array_wc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return set_pages_array_uc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ return set_pages_wb(page, numpages);
+}
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ return set_memory_wc(addr, numpages);
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+ return set_pages_uc(page, numpages);
+}
+
+#else /* for CONFIG_X86 */
+
+#if IS_ENABLED(CONFIG_AGP)
+
+#include <asm/agp.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ int i;
+
+ for (i = 0; i < numpages; i++)
+ unmap_page_from_agp(page++);
+ return 0;
+}
+
+#else /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ return 0;
+}
+
+#endif /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+ return 0;
+}
+
+#endif /* for CONFIG_X86 */
+
+#endif
diff --git a/include/dt-bindings/clock/actions,s700-cmu.h b/include/dt-bindings/clock/actions,s700-cmu.h
new file mode 100644
index 000000000000..3e1942996724
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s700-cmu.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Device Tree binding constants for Actions Semi S700 Clock Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Pathiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_S700_H
+#define __DT_BINDINGS_CLOCK_S700_H
+
+#define CLK_NONE 0
+
+/* pll clocks */
+#define CLK_CORE_PLL 1
+#define CLK_DEV_PLL 2
+#define CLK_DDR_PLL 3
+#define CLK_NAND_PLL 4
+#define CLK_DISPLAY_PLL 5
+#define CLK_TVOUT_PLL 6
+#define CLK_CVBS_PLL 7
+#define CLK_AUDIO_PLL 8
+#define CLK_ETHERNET_PLL 9
+
+/* system clock */
+#define CLK_CPU 10
+#define CLK_DEV 11
+#define CLK_AHB 12
+#define CLK_APB 13
+#define CLK_DMAC 14
+#define CLK_NOC0_CLK_MUX 15
+#define CLK_NOC1_CLK_MUX 16
+#define CLK_HP_CLK_MUX 17
+#define CLK_HP_CLK_DIV 18
+#define CLK_NOC1_CLK_DIV 19
+#define CLK_NOC0 20
+#define CLK_NOC1 21
+#define CLK_SENOR_SRC 22
+
+/* peripheral device clock */
+#define CLK_GPIO 23
+#define CLK_TIMER 24
+#define CLK_DSI 25
+#define CLK_CSI 26
+#define CLK_SI 27
+#define CLK_DE 28
+#define CLK_HDE 29
+#define CLK_VDE 30
+#define CLK_VCE 31
+#define CLK_NAND 32
+#define CLK_SD0 33
+#define CLK_SD1 34
+#define CLK_SD2 35
+
+#define CLK_UART0 36
+#define CLK_UART1 37
+#define CLK_UART2 38
+#define CLK_UART3 39
+#define CLK_UART4 40
+#define CLK_UART5 41
+#define CLK_UART6 42
+
+#define CLK_PWM0 43
+#define CLK_PWM1 44
+#define CLK_PWM2 45
+#define CLK_PWM3 46
+#define CLK_PWM4 47
+#define CLK_PWM5 48
+#define CLK_GPU3D 49
+
+#define CLK_I2C0 50
+#define CLK_I2C1 51
+#define CLK_I2C2 52
+#define CLK_I2C3 53
+
+#define CLK_SPI0 54
+#define CLK_SPI1 55
+#define CLK_SPI2 56
+#define CLK_SPI3 57
+
+#define CLK_USB3_480MPLL0 58
+#define CLK_USB3_480MPHY0 59
+#define CLK_USB3_5GPHY 60
+#define CLK_USB3_CCE 61
+#define CLK_USB3_MAC 62
+
+#define CLK_LCD 63
+#define CLK_HDMI_AUDIO 64
+#define CLK_I2SRX 65
+#define CLK_I2STX 66
+
+#define CLK_SENSOR0 67
+#define CLK_SENSOR1 68
+
+#define CLK_HDMI_DEV 69
+
+#define CLK_ETHERNET 70
+#define CLK_RMII_REF 71
+
+#define CLK_USB2H0_PLLEN 72
+#define CLK_USB2H0_PHY 73
+#define CLK_USB2H0_CCE 74
+#define CLK_USB2H1_PLLEN 75
+#define CLK_USB2H1_PHY 76
+#define CLK_USB2H1_CCE 77
+
+#define CLK_TVOUT 78
+
+#define CLK_THERMAL_SENSOR 79
+
+#define CLK_IRC_SWITCH 80
+#define CLK_PCM1 81
+#define CLK_NR_CLKS (CLK_PCM1 + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_S700_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 44761849fcbe..f43738607d77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -25,7 +25,7 @@
#define ASPEED_CLK_GATE_RSACLK 19
#define ASPEED_CLK_GATE_UART3CLK 20
#define ASPEED_CLK_GATE_UART4CLK 21
-#define ASPEED_CLK_GATE_SDCLKCLK 22
+#define ASPEED_CLK_GATE_SDCLK 22
#define ASPEED_CLK_GATE_LHCCLK 23
#define ASPEED_CLK_HPLL 24
#define ASPEED_CLK_AHB 25
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
new file mode 100644
index 000000000000..fd9c362099d9
--- /dev/null
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __AXG_AUDIO_CLKC_BINDINGS_H
+#define __AXG_AUDIO_CLKC_BINDINGS_H
+
+#define AUD_CLKID_SLV_SCLK0 9
+#define AUD_CLKID_SLV_SCLK1 10
+#define AUD_CLKID_SLV_SCLK2 11
+#define AUD_CLKID_SLV_SCLK3 12
+#define AUD_CLKID_SLV_SCLK4 13
+#define AUD_CLKID_SLV_SCLK5 14
+#define AUD_CLKID_SLV_SCLK6 15
+#define AUD_CLKID_SLV_SCLK7 16
+#define AUD_CLKID_SLV_SCLK8 17
+#define AUD_CLKID_SLV_SCLK9 18
+#define AUD_CLKID_SLV_LRCLK0 19
+#define AUD_CLKID_SLV_LRCLK1 20
+#define AUD_CLKID_SLV_LRCLK2 21
+#define AUD_CLKID_SLV_LRCLK3 22
+#define AUD_CLKID_SLV_LRCLK4 23
+#define AUD_CLKID_SLV_LRCLK5 24
+#define AUD_CLKID_SLV_LRCLK6 25
+#define AUD_CLKID_SLV_LRCLK7 26
+#define AUD_CLKID_SLV_LRCLK8 27
+#define AUD_CLKID_SLV_LRCLK9 28
+#define AUD_CLKID_DDR_ARB 29
+#define AUD_CLKID_PDM 30
+#define AUD_CLKID_TDMIN_A 31
+#define AUD_CLKID_TDMIN_B 32
+#define AUD_CLKID_TDMIN_C 33
+#define AUD_CLKID_TDMIN_LB 34
+#define AUD_CLKID_TDMOUT_A 35
+#define AUD_CLKID_TDMOUT_B 36
+#define AUD_CLKID_TDMOUT_C 37
+#define AUD_CLKID_FRDDR_A 38
+#define AUD_CLKID_FRDDR_B 39
+#define AUD_CLKID_FRDDR_C 40
+#define AUD_CLKID_TODDR_A 41
+#define AUD_CLKID_TODDR_B 42
+#define AUD_CLKID_TODDR_C 43
+#define AUD_CLKID_LOOPBACK 44
+#define AUD_CLKID_SPDIFIN 45
+#define AUD_CLKID_SPDIFOUT 46
+#define AUD_CLKID_RESAMPLE 47
+#define AUD_CLKID_POWER_DETECT 48
+#define AUD_CLKID_MST_A_MCLK 49
+#define AUD_CLKID_MST_B_MCLK 50
+#define AUD_CLKID_MST_C_MCLK 51
+#define AUD_CLKID_MST_D_MCLK 52
+#define AUD_CLKID_MST_E_MCLK 53
+#define AUD_CLKID_MST_F_MCLK 54
+#define AUD_CLKID_SPDIFOUT_CLK 55
+#define AUD_CLKID_SPDIFIN_CLK 56
+#define AUD_CLKID_PDM_DCLK 57
+#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_SCLK 79
+#define AUD_CLKID_MST_B_SCLK 80
+#define AUD_CLKID_MST_C_SCLK 81
+#define AUD_CLKID_MST_D_SCLK 82
+#define AUD_CLKID_MST_E_SCLK 83
+#define AUD_CLKID_MST_F_SCLK 84
+#define AUD_CLKID_MST_A_LRCLK 86
+#define AUD_CLKID_MST_B_LRCLK 87
+#define AUD_CLKID_MST_C_LRCLK 88
+#define AUD_CLKID_MST_D_LRCLK 89
+#define AUD_CLKID_MST_E_LRCLK 90
+#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
+#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
+#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
+#define AUD_CLKID_TDMIN_LB_SCLK_SEL 119
+#define AUD_CLKID_TDMOUT_A_SCLK_SEL 120
+#define AUD_CLKID_TDMOUT_B_SCLK_SEL 121
+#define AUD_CLKID_TDMOUT_C_SCLK_SEL 122
+#define AUD_CLKID_TDMIN_A_SCLK 123
+#define AUD_CLKID_TDMIN_B_SCLK 124
+#define AUD_CLKID_TDMIN_C_SCLK 125
+#define AUD_CLKID_TDMIN_LB_SCLK 126
+#define AUD_CLKID_TDMOUT_A_SCLK 127
+#define AUD_CLKID_TDMOUT_B_SCLK 128
+#define AUD_CLKID_TDMOUT_C_SCLK 129
+#define AUD_CLKID_TDMIN_A_LRCLK 130
+#define AUD_CLKID_TDMIN_B_LRCLK 131
+#define AUD_CLKID_TDMIN_C_LRCLK 132
+#define AUD_CLKID_TDMIN_LB_LRCLK 133
+#define AUD_CLKID_TDMOUT_A_LRCLK 134
+#define AUD_CLKID_TDMOUT_B_LRCLK 135
+#define AUD_CLKID_TDMOUT_C_LRCLK 136
+
+#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 555937a25504..fd1f938c38d1 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -68,5 +68,9 @@
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
#define CLKID_HIFI_PLL 69
+#define CLKID_PCIE_CML_EN0 79
+#define CLKID_PCIE_CML_EN1 80
+#define CLKID_MIPI_ENABLE 81
+#define CLKID_GEN_CLK 84
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 7a892be90549..3979d48c025f 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -127,5 +127,6 @@
#define CLKID_VAPB 140
#define CLKID_VDEC_1 153
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK 159
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index 151111e68f4f..1036475f997d 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -197,6 +197,13 @@
#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171
#define IMX6SLL_CLK_EXTERN_AUDIO 172
-#define IMX6SLL_CLK_END 173
+#define IMX6SLL_CLK_GPIO1 173
+#define IMX6SLL_CLK_GPIO2 174
+#define IMX6SLL_CLK_GPIO3 175
+#define IMX6SLL_CLK_GPIO4 176
+#define IMX6SLL_CLK_GPIO5 177
+#define IMX6SLL_CLK_GPIO6 178
+
+#define IMX6SLL_CLK_END 179
#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 9564597cbfac..f8e0476a3a0e 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -235,27 +235,31 @@
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
-#define IMX6UL_CLK_CKO1_SEL 225
-#define IMX6UL_CLK_CKO1_PODF 226
-#define IMX6UL_CLK_CKO1 227
-#define IMX6UL_CLK_CKO2_SEL 228
-#define IMX6UL_CLK_CKO2_PODF 229
-#define IMX6UL_CLK_CKO2 230
-#define IMX6UL_CLK_CKO 231
+#define IMX6ULL_CLK_ESAI_PRED 225
+#define IMX6ULL_CLK_ESAI_PODF 226
+#define IMX6ULL_CLK_ESAI_EXTAL 227
+#define IMX6ULL_CLK_ESAI_MEM 228
+#define IMX6ULL_CLK_ESAI_IPG 229
+#define IMX6ULL_CLK_DCP_CLK 230
+#define IMX6ULL_CLK_EPDC_PRE_SEL 231
+#define IMX6ULL_CLK_EPDC_SEL 232
+#define IMX6ULL_CLK_EPDC_PODF 233
+#define IMX6ULL_CLK_EPDC_ACLK 234
+#define IMX6ULL_CLK_EPDC_PIX 235
+#define IMX6ULL_CLK_ESAI_SEL 236
+#define IMX6UL_CLK_CKO1_SEL 237
+#define IMX6UL_CLK_CKO1_PODF 238
+#define IMX6UL_CLK_CKO1 239
+#define IMX6UL_CLK_CKO2_SEL 240
+#define IMX6UL_CLK_CKO2_PODF 241
+#define IMX6UL_CLK_CKO2 242
+#define IMX6UL_CLK_CKO 243
+#define IMX6UL_CLK_GPIO1 244
+#define IMX6UL_CLK_GPIO2 245
+#define IMX6UL_CLK_GPIO3 246
+#define IMX6UL_CLK_GPIO4 247
+#define IMX6UL_CLK_GPIO5 248
-/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED 232
-#define IMX6ULL_CLK_ESAI_PODF 233
-#define IMX6ULL_CLK_ESAI_EXTAL 234
-#define IMX6ULL_CLK_ESAI_MEM 235
-#define IMX6ULL_CLK_ESAI_IPG 236
-#define IMX6ULL_CLK_DCP_CLK 237
-#define IMX6ULL_CLK_EPDC_PRE_SEL 238
-#define IMX6ULL_CLK_EPDC_SEL 239
-#define IMX6ULL_CLK_EPDC_PODF 240
-#define IMX6ULL_CLK_EPDC_ACLK 241
-#define IMX6ULL_CLK_EPDC_PIX 242
-#define IMX6ULL_CLK_ESAI_SEL 243
-#define IMX6UL_CLK_END 244
+#define IMX6UL_CLK_END 249
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/maxim,max9485.h b/include/dt-bindings/clock/maxim,max9485.h
new file mode 100644
index 000000000000..185b09ce1869
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max9485.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Daniel Mack
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_MAX9485_CLK_H
+#define __DT_BINDINGS_MAX9485_CLK_H
+
+#define MAX9485_MCLKOUT 0
+#define MAX9485_CLKOUT 1
+#define MAX9485_CLKOUT1 2
+#define MAX9485_CLKOUT2 3
+
+#endif /* __DT_BINDINGS_MAX9485_CLK_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
new file mode 100644
index 000000000000..00101479f7c4
--- /dev/null
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
+
+/* core clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_NPLL 4
+#define APLL_BOOST_H 5
+#define APLL_BOOST_L 6
+#define ARMCLK 7
+
+/* sclk gates (special clocks) */
+#define USB480M 14
+#define SCLK_PDM 15
+#define SCLK_I2S0_TX 16
+#define SCLK_I2S0_TX_OUT 17
+#define SCLK_I2S0_RX 18
+#define SCLK_I2S0_RX_OUT 19
+#define SCLK_I2S1 20
+#define SCLK_I2S1_OUT 21
+#define SCLK_I2S2 22
+#define SCLK_I2S2_OUT 23
+#define SCLK_UART1 24
+#define SCLK_UART2 25
+#define SCLK_UART3 26
+#define SCLK_UART4 27
+#define SCLK_UART5 28
+#define SCLK_I2C0 29
+#define SCLK_I2C1 30
+#define SCLK_I2C2 31
+#define SCLK_I2C3 32
+#define SCLK_I2C4 33
+#define SCLK_PWM0 34
+#define SCLK_PWM1 35
+#define SCLK_SPI0 36
+#define SCLK_SPI1 37
+#define SCLK_TIMER0 38
+#define SCLK_TIMER1 39
+#define SCLK_TIMER2 40
+#define SCLK_TIMER3 41
+#define SCLK_TIMER4 42
+#define SCLK_TIMER5 43
+#define SCLK_TSADC 44
+#define SCLK_SARADC 45
+#define SCLK_OTP 46
+#define SCLK_OTP_USR 47
+#define SCLK_CRYPTO 48
+#define SCLK_CRYPTO_APK 49
+#define SCLK_DDRC 50
+#define SCLK_ISP 51
+#define SCLK_CIF_OUT 52
+#define SCLK_RGA_CORE 53
+#define SCLK_VOPB_PWM 54
+#define SCLK_NANDC 55
+#define SCLK_SDIO 56
+#define SCLK_EMMC 57
+#define SCLK_SFC 58
+#define SCLK_SDMMC 59
+#define SCLK_OTG_ADP 60
+#define SCLK_GMAC_SRC 61
+#define SCLK_GMAC 62
+#define SCLK_GMAC_RX_TX 63
+#define SCLK_MAC_REF 64
+#define SCLK_MAC_REFOUT 65
+#define SCLK_MAC_OUT 66
+#define SCLK_SDMMC_DRV 67
+#define SCLK_SDMMC_SAMPLE 68
+#define SCLK_SDIO_DRV 69
+#define SCLK_SDIO_SAMPLE 70
+#define SCLK_EMMC_DRV 71
+#define SCLK_EMMC_SAMPLE 72
+#define SCLK_GPU 73
+#define SCLK_PVTM 74
+#define SCLK_CORE_VPU 75
+#define SCLK_GMAC_RMII 76
+#define SCLK_UART2_SRC 77
+#define SCLK_NANDC_DIV 78
+#define SCLK_NANDC_DIV50 79
+#define SCLK_SDIO_DIV 80
+#define SCLK_SDIO_DIV50 81
+#define SCLK_EMMC_DIV 82
+#define SCLK_EMMC_DIV50 83
+#define SCLK_DDRCLK 84
+#define SCLK_UART1_SRC 85
+
+/* dclk gates */
+#define DCLK_VOPB 150
+#define DCLK_VOPL 151
+
+/* aclk gates */
+#define ACLK_GPU 170
+#define ACLK_BUS_PRE 171
+#define ACLK_CRYPTO 172
+#define ACLK_VI_PRE 173
+#define ACLK_VO_PRE 174
+#define ACLK_VPU 175
+#define ACLK_PERI_PRE 176
+#define ACLK_GMAC 178
+#define ACLK_CIF 179
+#define ACLK_ISP 180
+#define ACLK_VOPB 181
+#define ACLK_VOPL 182
+#define ACLK_RGA 183
+#define ACLK_GIC 184
+#define ACLK_DCF 186
+#define ACLK_DMAC 187
+#define ACLK_BUS_SRC 188
+#define ACLK_PERI_SRC 189
+
+/* hclk gates */
+#define HCLK_BUS_PRE 240
+#define HCLK_CRYPTO 241
+#define HCLK_VI_PRE 242
+#define HCLK_VO_PRE 243
+#define HCLK_VPU 244
+#define HCLK_PERI_PRE 245
+#define HCLK_MMC_NAND 246
+#define HCLK_SDMMC 247
+#define HCLK_USB 248
+#define HCLK_CIF 249
+#define HCLK_ISP 250
+#define HCLK_VOPB 251
+#define HCLK_VOPL 252
+#define HCLK_RGA 253
+#define HCLK_NANDC 254
+#define HCLK_SDIO 255
+#define HCLK_EMMC 256
+#define HCLK_SFC 257
+#define HCLK_OTG 258
+#define HCLK_HOST 259
+#define HCLK_HOST_ARB 260
+#define HCLK_PDM 261
+#define HCLK_I2S0 262
+#define HCLK_I2S1 263
+#define HCLK_I2S2 264
+
+/* pclk gates */
+#define PCLK_BUS_PRE 320
+#define PCLK_DDR 321
+#define PCLK_VO_PRE 322
+#define PCLK_GMAC 323
+#define PCLK_MIPI_DSI 324
+#define PCLK_MIPIDSIPHY 325
+#define PCLK_MIPICSIPHY 326
+#define PCLK_USB_GRF 327
+#define PCLK_DCF 328
+#define PCLK_UART1 329
+#define PCLK_UART2 330
+#define PCLK_UART3 331
+#define PCLK_UART4 332
+#define PCLK_UART5 333
+#define PCLK_I2C0 334
+#define PCLK_I2C1 335
+#define PCLK_I2C2 336
+#define PCLK_I2C3 337
+#define PCLK_I2C4 338
+#define PCLK_PWM0 339
+#define PCLK_PWM1 340
+#define PCLK_SPI0 341
+#define PCLK_SPI1 342
+#define PCLK_SARADC 343
+#define PCLK_TSADC 344
+#define PCLK_TIMER 345
+#define PCLK_OTP_NS 346
+#define PCLK_WDT_NS 347
+#define PCLK_GPIO1 348
+#define PCLK_GPIO2 349
+#define PCLK_GPIO3 350
+#define PCLK_ISP 351
+#define PCLK_CIF 352
+#define PCLK_OTP_PHY 353
+
+#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
+
+/* pmu-clocks indices */
+
+#define PLL_GPLL 1
+
+#define SCLK_RTC32K_PMU 4
+#define SCLK_WIFI_PMU 5
+#define SCLK_UART0_PMU 6
+#define SCLK_PVTM_PMU 7
+#define PCLK_PMU_PRE 8
+#define SCLK_REF24M_PMU 9
+#define SCLK_USBPHY_REF 10
+#define SCLK_MIPIDSIPHY_REF 11
+
+#define XIN24M_DIV 12
+
+#define PCLK_GPIO0_PMU 20
+#define PCLK_UART0_PMU 21
+
+#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
+
+/* soft-reset indices */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_TOPDBG 12
+#define SRST_CORE_NOC 13
+#define SRST_STRC_A 14
+#define SRST_L2C 15
+
+#define SRST_DAP 16
+#define SRST_CORE_PVTM 17
+#define SRST_GPU 18
+#define SRST_GPU_NIU 19
+#define SRST_UPCTL2 20
+#define SRST_UPCTL2_A 21
+#define SRST_UPCTL2_P 22
+#define SRST_MSCH 23
+#define SRST_MSCH_P 24
+#define SRST_DDRMON_P 25
+#define SRST_DDRSTDBY_P 26
+#define SRST_DDRSTDBY 27
+#define SRST_DDRGRF_p 28
+#define SRST_AXI_SPLIT_A 29
+#define SRST_AXI_CMD_A 30
+#define SRST_AXI_CMD_P 31
+
+#define SRST_DDRPHY 32
+#define SRST_DDRPHYDIV 33
+#define SRST_DDRPHY_P 34
+#define SRST_VPU_A 36
+#define SRST_VPU_NIU_A 37
+#define SRST_VPU_H 38
+#define SRST_VPU_NIU_H 39
+#define SRST_VI_NIU_A 40
+#define SRST_VI_NIU_H 41
+#define SRST_ISP_H 42
+#define SRST_ISP 43
+#define SRST_CIF_A 44
+#define SRST_CIF_H 45
+#define SRST_CIF_PCLKIN 46
+#define SRST_MIPICSIPHY_P 47
+
+#define SRST_VO_NIU_A 48
+#define SRST_VO_NIU_H 49
+#define SRST_VO_NIU_P 50
+#define SRST_VOPB_A 51
+#define SRST_VOPB_H 52
+#define SRST_VOPB 53
+#define SRST_PWM_VOPB 54
+#define SRST_VOPL_A 55
+#define SRST_VOPL_H 56
+#define SRST_VOPL 57
+#define SRST_RGA_A 58
+#define SRST_RGA_H 59
+#define SRST_RGA 60
+#define SRST_MIPIDSI_HOST_P 61
+#define SRST_MIPIDSIPHY_P 62
+#define SRST_VPU_CORE 63
+
+#define SRST_PERI_NIU_A 64
+#define SRST_USB_NIU_H 65
+#define SRST_USB2OTG_H 66
+#define SRST_USB2OTG 67
+#define SRST_USB2OTG_ADP 68
+#define SRST_USB2HOST_H 69
+#define SRST_USB2HOST_ARB_H 70
+#define SRST_USB2HOST_AUX_H 71
+#define SRST_USB2HOST_EHCI 72
+#define SRST_USB2HOST 73
+#define SRST_USBPHYPOR 74
+#define SRST_USBPHY_OTG_PORT 75
+#define SRST_USBPHY_HOST_PORT 76
+#define SRST_USBPHY_GRF 77
+#define SRST_CPU_BOOST_P 78
+#define SRST_CPU_BOOST 79
+
+#define SRST_MMC_NAND_NIU_H 80
+#define SRST_SDIO_H 81
+#define SRST_EMMC_H 82
+#define SRST_SFC_H 83
+#define SRST_SFC 84
+#define SRST_SDCARD_NIU_H 85
+#define SRST_SDMMC_H 86
+#define SRST_NANDC_H 89
+#define SRST_NANDC 90
+#define SRST_GMAC_NIU_A 92
+#define SRST_GMAC_NIU_P 93
+#define SRST_GMAC_A 94
+
+#define SRST_PMU_NIU_P 96
+#define SRST_PMU_SGRF_P 97
+#define SRST_PMU_GRF_P 98
+#define SRST_PMU 99
+#define SRST_PMU_MEM_P 100
+#define SRST_PMU_GPIO0_P 101
+#define SRST_PMU_UART0_P 102
+#define SRST_PMU_CRU_P 103
+#define SRST_PMU_PVTM 104
+#define SRST_PMU_UART 105
+#define SRST_PMU_NIU_H 106
+#define SRST_PMU_DDR_FAIL_SAVE 107
+#define SRST_PMU_CORE_PERF_A 108
+#define SRST_PMU_CORE_GRF_P 109
+#define SRST_PMU_GPU_PERF_A 110
+#define SRST_PMU_GPU_GRF_P 111
+
+#define SRST_CRYPTO_NIU_A 112
+#define SRST_CRYPTO_NIU_H 113
+#define SRST_CRYPTO_A 114
+#define SRST_CRYPTO_H 115
+#define SRST_CRYPTO 116
+#define SRST_CRYPTO_APK 117
+#define SRST_BUS_NIU_H 120
+#define SRST_USB_NIU_P 121
+#define SRST_BUS_TOP_NIU_P 122
+#define SRST_INTMEM_A 123
+#define SRST_GIC_A 124
+#define SRST_ROM_H 126
+#define SRST_DCF_A 127
+
+#define SRST_DCF_P 128
+#define SRST_PDM_H 129
+#define SRST_PDM 130
+#define SRST_I2S0_H 131
+#define SRST_I2S0_TX 132
+#define SRST_I2S1_H 133
+#define SRST_I2S1 134
+#define SRST_I2S2_H 135
+#define SRST_I2S2 136
+#define SRST_UART1_P 137
+#define SRST_UART1 138
+#define SRST_UART2_P 139
+#define SRST_UART2 140
+#define SRST_UART3_P 141
+#define SRST_UART3 142
+#define SRST_UART4_P 143
+
+#define SRST_UART4 144
+#define SRST_UART5_P 145
+#define SRST_UART5 146
+#define SRST_I2C0_P 147
+#define SRST_I2C0 148
+#define SRST_I2C1_P 149
+#define SRST_I2C1 150
+#define SRST_I2C2_P 151
+#define SRST_I2C2 152
+#define SRST_I2C3_P 153
+#define SRST_I2C3 154
+#define SRST_PWM0_P 157
+#define SRST_PWM0 158
+#define SRST_PWM1_P 159
+
+#define SRST_PWM1 160
+#define SRST_SPI0_P 161
+#define SRST_SPI0 162
+#define SRST_SPI1_P 163
+#define SRST_SPI1 164
+#define SRST_SARADC_P 165
+#define SRST_SARADC 166
+#define SRST_TSADC_P 167
+#define SRST_TSADC 168
+#define SRST_TIMER_P 169
+#define SRST_TIMER0 170
+#define SRST_TIMER1 171
+#define SRST_TIMER2 172
+#define SRST_TIMER3 173
+#define SRST_TIMER4 174
+#define SRST_TIMER5 175
+
+#define SRST_OTP_NS_P 176
+#define SRST_OTP_NS_SBPI 177
+#define SRST_OTP_NS_USR 178
+#define SRST_OTP_PHY_P 179
+#define SRST_OTP_PHY 180
+#define SRST_WDT_NS_P 181
+#define SRST_GPIO1_P 182
+#define SRST_GPIO2_P 183
+#define SRST_GPIO3_P 184
+#define SRST_SGRF_P 185
+#define SRST_GRF_P 186
+#define SRST_I2S0_RX 191
+
+#endif
diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h
index e65803b1dc7e..0b0fd2b01538 100644
--- a/include/dt-bindings/clock/pxa-clock.h
+++ b/include/dt-bindings/clock/pxa-clock.h
@@ -72,6 +72,7 @@
#define CLK_USIM 58
#define CLK_USIM1 59
#define CLK_USMI0 60
-#define CLK_MAX 61
+#define CLK_OSC32k768 61
+#define CLK_MAX 62
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
new file mode 100644
index 000000000000..11eed4bc9646
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
+
+/* DISP_CC clock registers */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AXI_CLK 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 4
+#define DISP_CC_MDSS_BYTE1_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_ESC1_CLK 10
+#define DISP_CC_MDSS_ESC1_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_CLK 12
+#define DISP_CC_MDSS_MDP_CLK_SRC 13
+#define DISP_CC_MDSS_MDP_LUT_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_PCLK1_CLK 17
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 18
+#define DISP_CC_MDSS_ROT_CLK 19
+#define DISP_CC_MDSS_ROT_CLK_SRC 20
+#define DISP_CC_MDSS_RSCC_AHB_CLK 21
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27
+
+/* DISP_CC Reset */
+#define DISP_CC_MDSS_RSCC_BCR 0
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index aca61264f12c..f96fc2dbf60e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -192,6 +192,8 @@
#define GCC_VS_CTRL_CLK_SRC 182
#define GCC_VSENSOR_CLK_SRC 183
#define GPLL4 184
+#define GCC_CPUSS_DVM_BUS_CLK 185
+#define GCC_CPUSS_GNOC_CLK 186
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..90c0f3dc1ba1
--- /dev/null
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R9A06G032 sysctrl IDs
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_R9A06G032_SYSCTRL_H__
+#define __DT_BINDINGS_R9A06G032_SYSCTRL_H__
+
+#define R9A06G032_CLK_PLL_USB 1
+#define R9A06G032_CLK_48 1 /* AKA CLK_PLL_USB */
+#define R9A06G032_MSEBIS_CLK 3 /* AKA CLKOUT_D16 */
+#define R9A06G032_MSEBIM_CLK 3 /* AKA CLKOUT_D16 */
+#define R9A06G032_CLK_DDRPHY_PLLCLK 5 /* AKA CLKOUT_D1OR2 */
+#define R9A06G032_CLK50 6 /* AKA CLKOUT_D20 */
+#define R9A06G032_CLK25 7 /* AKA CLKOUT_D40 */
+#define R9A06G032_CLK125 9 /* AKA CLKOUT_D8 */
+#define R9A06G032_CLK_P5_PG1 17 /* AKA DIV_P5_PG */
+#define R9A06G032_CLK_REF_SYNC 21 /* AKA DIV_REF_SYNC */
+#define R9A06G032_CLK_25_PG4 26
+#define R9A06G032_CLK_25_PG5 27
+#define R9A06G032_CLK_25_PG6 28
+#define R9A06G032_CLK_25_PG7 29
+#define R9A06G032_CLK_25_PG8 30
+#define R9A06G032_CLK_ADC 31
+#define R9A06G032_CLK_ECAT100 32
+#define R9A06G032_CLK_HSR100 33
+#define R9A06G032_CLK_I2C0 34
+#define R9A06G032_CLK_I2C1 35
+#define R9A06G032_CLK_MII_REF 36
+#define R9A06G032_CLK_NAND 37
+#define R9A06G032_CLK_NOUSBP2_PG6 38
+#define R9A06G032_CLK_P1_PG2 39
+#define R9A06G032_CLK_P1_PG3 40
+#define R9A06G032_CLK_P1_PG4 41
+#define R9A06G032_CLK_P4_PG3 42
+#define R9A06G032_CLK_P4_PG4 43
+#define R9A06G032_CLK_P6_PG1 44
+#define R9A06G032_CLK_P6_PG2 45
+#define R9A06G032_CLK_P6_PG3 46
+#define R9A06G032_CLK_P6_PG4 47
+#define R9A06G032_CLK_PCI_USB 48
+#define R9A06G032_CLK_QSPI0 49
+#define R9A06G032_CLK_QSPI1 50
+#define R9A06G032_CLK_RGMII_REF 51
+#define R9A06G032_CLK_RMII_REF 52
+#define R9A06G032_CLK_SDIO0 53
+#define R9A06G032_CLK_SDIO1 54
+#define R9A06G032_CLK_SERCOS100 55
+#define R9A06G032_CLK_SLCD 56
+#define R9A06G032_CLK_SPI0 57
+#define R9A06G032_CLK_SPI1 58
+#define R9A06G032_CLK_SPI2 59
+#define R9A06G032_CLK_SPI3 60
+#define R9A06G032_CLK_SPI4 61
+#define R9A06G032_CLK_SPI5 62
+#define R9A06G032_CLK_SWITCH 63
+#define R9A06G032_HCLK_ECAT125 65
+#define R9A06G032_HCLK_PINCONFIG 66
+#define R9A06G032_HCLK_SERCOS 67
+#define R9A06G032_HCLK_SGPIO2 68
+#define R9A06G032_HCLK_SGPIO3 69
+#define R9A06G032_HCLK_SGPIO4 70
+#define R9A06G032_HCLK_TIMER0 71
+#define R9A06G032_HCLK_TIMER1 72
+#define R9A06G032_HCLK_USBF 73
+#define R9A06G032_HCLK_USBH 74
+#define R9A06G032_HCLK_USBPM 75
+#define R9A06G032_CLK_48_PG_F 76
+#define R9A06G032_CLK_48_PG4 77
+#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
+#define R9A06G032_HCLK_CAN0 85
+#define R9A06G032_HCLK_CAN1 86
+#define R9A06G032_HCLK_DELTASIGMA 87
+#define R9A06G032_HCLK_PWMPTO 88
+#define R9A06G032_HCLK_RSV 89
+#define R9A06G032_HCLK_SGPIO0 90
+#define R9A06G032_HCLK_SGPIO1 91
+#define R9A06G032_RTOS_MDC 92
+#define R9A06G032_CLK_CM3 93
+#define R9A06G032_CLK_DDRC 94
+#define R9A06G032_CLK_ECAT25 95
+#define R9A06G032_CLK_HSR50 96
+#define R9A06G032_CLK_HW_RTOS 97
+#define R9A06G032_CLK_SERCOS50 98
+#define R9A06G032_HCLK_ADC 99
+#define R9A06G032_HCLK_CM3 100
+#define R9A06G032_HCLK_CRYPTO_EIP150 101
+#define R9A06G032_HCLK_CRYPTO_EIP93 102
+#define R9A06G032_HCLK_DDRC 103
+#define R9A06G032_HCLK_DMA0 104
+#define R9A06G032_HCLK_DMA1 105
+#define R9A06G032_HCLK_GMAC0 106
+#define R9A06G032_HCLK_GMAC1 107
+#define R9A06G032_HCLK_GPIO0 108
+#define R9A06G032_HCLK_GPIO1 109
+#define R9A06G032_HCLK_GPIO2 110
+#define R9A06G032_HCLK_HSR 111
+#define R9A06G032_HCLK_I2C0 112
+#define R9A06G032_HCLK_I2C1 113
+#define R9A06G032_HCLK_LCD 114
+#define R9A06G032_HCLK_MSEBI_M 115
+#define R9A06G032_HCLK_MSEBI_S 116
+#define R9A06G032_HCLK_NAND 117
+#define R9A06G032_HCLK_PG_I 118
+#define R9A06G032_HCLK_PG19 119
+#define R9A06G032_HCLK_PG20 120
+#define R9A06G032_HCLK_PG3 121
+#define R9A06G032_HCLK_PG4 122
+#define R9A06G032_HCLK_QSPI0 123
+#define R9A06G032_HCLK_QSPI1 124
+#define R9A06G032_HCLK_ROM 125
+#define R9A06G032_HCLK_RTC 126
+#define R9A06G032_HCLK_SDIO0 127
+#define R9A06G032_HCLK_SDIO1 128
+#define R9A06G032_HCLK_SEMAP 129
+#define R9A06G032_HCLK_SPI0 130
+#define R9A06G032_HCLK_SPI1 131
+#define R9A06G032_HCLK_SPI2 132
+#define R9A06G032_HCLK_SPI3 133
+#define R9A06G032_HCLK_SPI4 134
+#define R9A06G032_HCLK_SPI5 135
+#define R9A06G032_HCLK_SWITCH 136
+#define R9A06G032_HCLK_SWITCH_RG 137
+#define R9A06G032_HCLK_UART0 138
+#define R9A06G032_HCLK_UART1 139
+#define R9A06G032_HCLK_UART2 140
+#define R9A06G032_HCLK_UART3 141
+#define R9A06G032_HCLK_UART4 142
+#define R9A06G032_HCLK_UART5 143
+#define R9A06G032_HCLK_UART6 144
+#define R9A06G032_HCLK_UART7 145
+#define R9A06G032_CLK_UART0 146
+#define R9A06G032_CLK_UART1 147
+#define R9A06G032_CLK_UART2 148
+#define R9A06G032_CLK_UART3 149
+#define R9A06G032_CLK_UART4 150
+#define R9A06G032_CLK_UART5 151
+#define R9A06G032_CLK_UART6 152
+#define R9A06G032_CLK_UART7 153
+
+#endif /* __DT_BINDINGS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/dt-bindings/clock/rk3399-ddr.h b/include/dt-bindings/clock/rk3399-ddr.h
new file mode 100644
index 000000000000..ed2280844963
--- /dev/null
+++ b/include/dt-bindings/clock/rk3399-ddr.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef DT_BINDINGS_DDR_H
+#define DT_BINDINGS_DDR_H
+
+/*
+ * DDR3 SDRAM Standard Speed Bins include tCK, tRCD, tRP, tRAS and tRC for
+ * each corresponding bin.
+ */
+
+/* DDR3-800 (5-5-5) */
+#define DDR3_800D 0
+/* DDR3-800 (6-6-6) */
+#define DDR3_800E 1
+/* DDR3-1066 (6-6-6) */
+#define DDR3_1066E 2
+/* DDR3-1066 (7-7-7) */
+#define DDR3_1066F 3
+/* DDR3-1066 (8-8-8) */
+#define DDR3_1066G 4
+/* DDR3-1333 (7-7-7) */
+#define DDR3_1333F 5
+/* DDR3-1333 (8-8-8) */
+#define DDR3_1333G 6
+/* DDR3-1333 (9-9-9) */
+#define DDR3_1333H 7
+/* DDR3-1333 (10-10-10) */
+#define DDR3_1333J 8
+/* DDR3-1600 (8-8-8) */
+#define DDR3_1600G 9
+/* DDR3-1600 (9-9-9) */
+#define DDR3_1600H 10
+/* DDR3-1600 (10-10-10) */
+#define DDR3_1600J 11
+/* DDR3-1600 (11-11-11) */
+#define DDR3_1600K 12
+/* DDR3-1600 (10-10-10) */
+#define DDR3_1866J 13
+/* DDR3-1866 (11-11-11) */
+#define DDR3_1866K 14
+/* DDR3-1866 (12-12-12) */
+#define DDR3_1866L 15
+/* DDR3-1866 (13-13-13) */
+#define DDR3_1866M 16
+/* DDR3-2133 (11-11-11) */
+#define DDR3_2133K 17
+/* DDR3-2133 (12-12-12) */
+#define DDR3_2133L 18
+/* DDR3-2133 (13-13-13) */
+#define DDR3_2133M 19
+/* DDR3-2133 (14-14-14) */
+#define DDR3_2133N 20
+/* DDR3 ATF default */
+#define DDR3_DEFAULT 21
+
+#endif
diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h
index 4fa5f69fc297..f9e15a235626 100644
--- a/include/dt-bindings/clock/sun8i-r40-ccu.h
+++ b/include/dt-bindings/clock/sun8i-r40-ccu.h
@@ -43,6 +43,10 @@
#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_
#define _DT_BINDINGS_CLK_SUN8I_R40_H_
+#define CLK_PLL_VIDEO0 7
+
+#define CLK_PLL_VIDEO1 16
+
#define CLK_CPU 24
#define CLK_BUS_MIPI_DSI 29
diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h
new file mode 100644
index 000000000000..25164d767835
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-tcon-top.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/* Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+
+#define CLK_TCON_TOP_TV0 0
+#define CLK_TCON_TOP_TV1 1
+#define CLK_TCON_TOP_DSI 2
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 2732d6c0fb39..eb81867eac77 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -39,4 +39,8 @@
#define AT91_PERIPH_C 3
#define AT91_PERIPH_D 4
+#define ATMEL_PIO_DRVSTR_LO 1
+#define ATMEL_PIO_DRVSTR_ME 2
+#define ATMEL_PIO_DRVSTR_HI 3
+
#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index ceb672305f59..b1832506b923 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Samsung's Exynos pinctrl bindings
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h
index cf28631d7109..d0baba1973d4 100644
--- a/include/dt-bindings/regulator/maxim,max77802.h
+++ b/include/dt-bindings/regulator/maxim,max77802.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for the Maxim 77802 PMIC regulators
*/
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
new file mode 100644
index 000000000000..86713dcf9e02
--- /dev/null
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+#ifndef __QCOM_RPMH_REGULATOR_H
+#define __QCOM_RPMH_REGULATOR_H
+
+/*
+ * These mode constants may be used to specify modes for various RPMh regulator
+ * device tree properties (e.g. regulator-initial-mode). Each type of regulator
+ * supports a subset of the possible modes.
+ *
+ * %RPMH_REGULATOR_MODE_RET: Retention mode in which only an extremely small
+ * load current is allowed. This mode is supported
+ * by LDO and SMPS type regulators.
+ * %RPMH_REGULATOR_MODE_LPM: Low power mode in which a small load current is
+ * allowed. This mode corresponds to PFM for SMPS
+ * and BOB type regulators. This mode is supported
+ * by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
+ * regulators.
+ * %RPMH_REGULATOR_MODE_AUTO: Auto mode in which the regulator hardware
+ * automatically switches between LPM and HPM based
+ * upon the real-time load current. This mode is
+ * supported by HFSMPS, BOB, and PMIC4 FTSMPS type
+ * regulators.
+ * %RPMH_REGULATOR_MODE_HPM: High power mode in which the full rated current
+ * of the regulator is allowed. This mode
+ * corresponds to PWM for SMPS and BOB type
+ * regulators. This mode is supported by all types
+ * of regulators.
+ */
+#define RPMH_REGULATOR_MODE_RET 0
+#define RPMH_REGULATOR_MODE_LPM 1
+#define RPMH_REGULATOR_MODE_AUTO 2
+#define RPMH_REGULATOR_MODE_HPM 3
+
+#endif
diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h
new file mode 100644
index 000000000000..868f998ea998
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_QCOM_RPMH_RSC_H__
+#define __DT_QCOM_RPMH_RSC_H__
+
+#define SLEEP_TCS 0
+#define WAKE_TCS 1
+#define ACTIVE_TCS 2
+#define CONTROL_TCS 3
+
+#endif /* __DT_QCOM_RPMH_RSC_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4b35a66383f9..de8d3d3fa651 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+ u32 level);
+
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
@@ -1055,27 +1058,20 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
/* Device properties */
-#define MAX_ACPI_REFERENCE_ARGS 8
-struct acpi_reference_args {
- struct acpi_device *adev;
- size_t nargs;
- u64 args[MAX_ACPI_REFERENCE_ARGS];
-};
-
#ifdef CONFIG_ACPI
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args);
+ struct fwnode_reference_args *args);
static inline int acpi_node_get_property_reference(
const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return __acpi_node_get_property_reference(fwnode, name, index,
- MAX_ACPI_REFERENCE_ARGS, args);
+ NR_FWNODE_REFERENCE_ARGS, args);
}
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
@@ -1093,14 +1089,6 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *
-acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle **remote,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint);
-
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
struct acpi_probe_entry *);
@@ -1166,7 +1154,7 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
static inline int
__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
@@ -1174,7 +1162,7 @@ __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
static inline int
acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644
index 000000000000..4cc40201273e
--- /dev/null
+++ b/include/linux/ascii85.h
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ASCII85_H_
+#define _ASCII85_H_
+
+#include <linux/kernel.h>
+
+#define ASCII85_BUFSZ 6
+
+static inline long
+ascii85_encode_len(long len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static inline const char *
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return "z";
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return out;
+}
+
+#endif
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0c27515d2cf6..8124815eb121 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -214,6 +214,7 @@ struct atmphy_ops {
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
+ unsigned int acct_truesize; /* truesize accounted to vcc */
};
#define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 01ce3997cb42..1e8e88bdaf09 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,8 @@
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -36,40 +38,46 @@
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
@@ -95,11 +103,23 @@
#endif
#endif /* atomic_add_return_relaxed */
+#ifndef atomic_inc
+#define atomic_inc(v) atomic_add(1, (v))
+#endif
+
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+#else /* atomic_inc_return */
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
+#endif /* atomic_inc_return */
#else /* atomic_inc_return_relaxed */
@@ -143,11 +163,23 @@
#endif
#endif /* atomic_sub_return_relaxed */
+#ifndef atomic_dec
+#define atomic_dec(v) atomic_sub(1, (v))
+#endif
+
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
+#else /* atomic_dec_return */
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
+#endif /* atomic_dec_return */
#else /* atomic_dec_return_relaxed */
@@ -328,12 +360,22 @@
#endif
#endif /* atomic_fetch_and_relaxed */
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_andnot
+#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
+#endif
+
#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
+
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
+#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
+#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
+#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
+#else /* atomic_fetch_andnot */
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
#else /* atomic_fetch_andnot_relaxed */
@@ -352,7 +394,6 @@
__atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
/* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_relaxed
@@ -520,112 +561,140 @@
#endif /* xchg_relaxed */
/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic_fetch_add_unless
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#endif
+
+/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline bool atomic_add_unless(atomic_t *v, int a, int u)
{
- return __atomic_add_unless(v, a, u) != u;
+ return atomic_fetch_add_unless(v, a, u) != u;
}
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
*/
#ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_inc_and_test
+static inline bool atomic_inc_and_test(atomic_t *v)
{
- return atomic_fetch_and_relaxed(~i, v);
+ return atomic_inc_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic_dec_and_test
+static inline bool atomic_dec_and_test(atomic_t *v)
{
- return atomic_fetch_and_acquire(~i, v);
+ return atomic_dec_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_sub_and_test
+static inline bool atomic_sub_and_test(int i, atomic_t *v)
{
- return atomic_fetch_and_release(~i, v);
+ return atomic_sub_return(i, v) == 0;
}
#endif
/**
- * atomic_inc_not_zero_hint - increment if not null
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
* @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
*
- * Returns: 0 if increment was not done, 1 otherwise.
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
*/
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+#ifndef atomic_add_negative
+static inline bool atomic_add_negative(int i, atomic_t *v)
{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
+ return atomic_add_return(i, v) < 0;
}
#endif
#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
+static inline bool atomic_inc_unless_negative(atomic_t *v)
{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
}
#endif
#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
+static inline bool atomic_dec_unless_positive(atomic_t *v)
{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
}
#endif
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
#ifndef atomic_dec_if_positive
static inline int atomic_dec_if_positive(atomic_t *v)
{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
+ int dec, c = atomic_read(v);
+
+ do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic_try_cmpxchg(v, &c, dec));
+
return dec;
}
#endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_add_return_relaxed */
+#ifndef atomic64_inc
+#define atomic64_inc(v) atomic64_add(1, (v))
+#endif
+
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#else /* atomic64_inc_return */
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
+#endif /* atomic64_inc_return */
#else /* atomic64_inc_return_relaxed */
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_sub_return_relaxed */
+#ifndef atomic64_dec
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#endif
+
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#else /* atomic64_dec_return */
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
+#endif /* atomic64_dec_return */
#else /* atomic64_dec_return_relaxed */
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_fetch_and_relaxed */
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_andnot
+#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
+#endif
+
#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
+#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
+#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
+#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
+#else /* atomic64_fetch_andnot */
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
#else /* atomic64_fetch_andnot_relaxed */
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg */
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic64_fetch_add_unless
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- atomic64_and(~i, v);
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
}
+#endif
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- return atomic64_fetch_and(~i, v);
+ return atomic64_fetch_add_unless(v, a, u) != u;
}
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_inc_and_test
+static inline bool atomic64_inc_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_relaxed(~i, v);
+ return atomic64_inc_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic64_dec_and_test
+static inline bool atomic64_dec_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_acquire(~i, v);
+ return atomic64_dec_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_sub_and_test
+static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+#endif
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#ifndef atomic64_add_negative
+static inline bool atomic64_add_negative(long long i, atomic64_t *v)
{
- return atomic64_fetch_and_release(~i, v);
+ return atomic64_add_return(i, v) < 0;
+}
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool atomic64_inc_unless_negative(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool atomic64_dec_unless_positive(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#endif
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic64 variable, v, was not decremented.
+ */
+#ifndef atomic64_dec_if_positive
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long dec, c = atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
}
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 69c78477590b..9334fbef7bae 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,9 @@ struct filename;
extern void audit_log_session_info(struct audit_buffer *ab);
+#define AUDIT_OFF 0
+#define AUDIT_ON 1
+#define AUDIT_LOCKED 2
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
@@ -202,7 +205,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
static inline void audit_log_task_info(struct audit_buffer *ab,
struct task_struct *tsk)
{ }
-#define audit_enabled 0
+#define audit_enabled AUDIT_OFF
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0bd432a4d7bd..24251762c20c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -22,7 +22,6 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
- WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
WB_start_all, /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f08f5fe7bd08..51371740d2a8 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -429,7 +429,6 @@ extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
-extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set fs_bio_set;
@@ -443,12 +442,6 @@ static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
}
-static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
-{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-
-}
-
extern blk_qc_t submit_bio(struct bio *);
extern void bio_endio(struct bio *);
@@ -496,9 +489,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-void generic_start_io_acct(struct request_queue *q, int rw,
+void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part);
-void generic_end_io_acct(struct request_queue *q, int rw,
+void generic_end_io_acct(struct request_queue *q, int op,
struct hd_struct *part,
unsigned long start_time);
@@ -553,8 +546,16 @@ do { \
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
+#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+#else
+static inline int bio_associate_blkcg_from_page(struct bio *bio,
+ struct page *page) { return 0; }
+#endif
+
#ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
void bio_disassociate_task(struct bio *bio);
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index cf2588d81148..65a6981eef7b 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -104,7 +104,7 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
-extern void __compiletime_warning("value doesn't fit into mask")
+extern void __compiletime_error("value doesn't fit into mask")
__field_overflow(void);
extern void __compiletime_error("bad bitfield mask")
__bad_mask(void);
@@ -121,8 +121,8 @@ static __always_inline u64 field_mask(u64 field)
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
- if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
- __field_overflow(); \
+ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
+ __field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
static __always_inline __##type type##_replace_bits(__##type old, \
@@ -143,6 +143,7 @@ static __always_inline base type##_get_bits(__##type v, base field) \
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
____MAKE_OP(u##size,u##size,,)
+____MAKE_OP(u8,u8,,)
__MAKE_OP(16)
__MAKE_OP(32)
__MAKE_OP(64)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..af419012d77d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,9 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) - (1ULL << (l)) + 1) & \
- (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6c666fd7de3c..34aec30e06c7 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -35,6 +35,7 @@ enum blkg_rwstat_type {
BLKG_RWSTAT_WRITE,
BLKG_RWSTAT_SYNC,
BLKG_RWSTAT_ASYNC,
+ BLKG_RWSTAT_DISCARD,
BLKG_RWSTAT_NR,
BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
@@ -136,6 +137,12 @@ struct blkcg_gq {
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
+
+ atomic_t use_delay;
+ atomic64_t delay_nsec;
+ atomic64_t delay_start;
+ u64 last_delay;
+ int last_use;
};
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
@@ -148,6 +155,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
+typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
+ size_t size);
struct blkcg_policy {
int plid;
@@ -167,6 +176,7 @@ struct blkcg_policy {
blkcg_pol_offline_pd_fn *pd_offline_fn;
blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+ blkcg_pol_stat_pd_fn *pd_stat_fn;
};
extern struct blkcg blkcg_root;
@@ -238,6 +248,42 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
return css_to_blkcg(task_css(current, io_cgrp_id));
}
+static inline bool blk_cgroup_congested(void)
+{
+ struct cgroup_subsys_state *css;
+ bool ret = false;
+
+ rcu_read_lock();
+ css = kthread_blkcg();
+ if (!css)
+ css = task_css(current, io_cgrp_id);
+ while (css) {
+ if (atomic_read(&css->cgroup->congestion_count)) {
+ ret = true;
+ break;
+ }
+ css = css->parent;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
+ * @return: true if this bio needs to be submitted with the root blkg context.
+ *
+ * In order to avoid priority inversions we sometimes need to issue a bio as if
+ * it were attached to the root blkg, and then backcharge to the actual owning
+ * blkg. The idea is we do bio_blkcg() to look up the actual context for the
+ * bio and attach the appropriate blkg to the bio. Then we call this helper and
+ * if it is true run with the root blkg for that queue and then do any
+ * backcharging to the originating cgroup once the io is complete.
+ */
+static inline bool bio_issue_as_root_blkg(struct bio *bio)
+{
+ return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
+}
+
/**
* blkcg_parent - get the parent of a blkcg
* @blkcg: blkcg of interest
@@ -296,6 +342,17 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
}
/**
+ * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for @q at the root level. See also blkg_lookup().
+ */
+static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
+{
+ return q->root_blkg;
+}
+
+/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
@@ -355,6 +412,21 @@ static inline void blkg_get(struct blkcg_gq *blkg)
atomic_inc(&blkg->refcnt);
}
+/**
+ * blkg_try_get - try and get a blkg reference
+ * @blkg: blkg to get
+ *
+ * This is for use when doing an RCU lookup of the blkg. We may be in the midst
+ * of freeing this blkg, so we can only use it if the refcnt is not zero.
+ */
+static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+{
+ if (atomic_inc_not_zero(&blkg->refcnt))
+ return blkg;
+ return NULL;
+}
+
+
void __blkg_release_rcu(struct rcu_head *rcu);
/**
@@ -589,7 +661,9 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
{
struct percpu_counter *cnt;
- if (op_is_write(op))
+ if (op_is_discard(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
+ else if (op_is_write(op))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
@@ -706,8 +780,14 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
+ /*
+ * If the bio is flagged with BIO_QUEUE_ENTERED it means this
+ * is a split bio and we would have already accounted for the
+ * size of the bio.
+ */
+ if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
+ bio->bi_iter.bi_size);
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
@@ -715,6 +795,59 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
return !throtl;
}
+static inline void blkcg_use_delay(struct blkcg_gq *blkg)
+{
+ if (atomic_add_return(1, &blkg->use_delay) == 1)
+ atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
+}
+
+static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
+{
+ int old = atomic_read(&blkg->use_delay);
+
+ if (old == 0)
+ return 0;
+
+ /*
+ * We do this song and dance because we can race with somebody else
+ * adding or removing delay. If we just did an atomic_dec we'd end up
+ * negative and we'd already be in trouble. We need to subtract 1 and
+ * then check to see if we were the last delay so we can drop the
+ * congestion count on the cgroup.
+ */
+ while (old) {
+ int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
+ if (cur == old)
+ break;
+ old = cur;
+ }
+
+ if (old == 0)
+ return 0;
+ if (old == 1)
+ atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
+ return 1;
+}
+
+static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
+{
+ int old = atomic_read(&blkg->use_delay);
+ if (!old)
+ return;
+ /* We only want 1 person clearing the congestion count for this blkg. */
+ while (old) {
+ int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
+ if (cur == old) {
+ atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
+ break;
+ }
+ old = cur;
+ }
+}
+
+void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
+void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_maybe_throttle_current(void);
#else /* CONFIG_BLK_CGROUP */
struct blkcg {
@@ -734,9 +867,16 @@ struct blkcg_policy {
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+static inline void blkcg_maybe_throttle_current(void) { }
+static inline bool blk_cgroup_congested(void) { return false; }
+
#ifdef CONFIG_BLOCK
+static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
+
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
+{ return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..1da59c16f637 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -35,10 +35,12 @@ struct blk_mq_hw_ctx {
struct sbitmap ctx_map;
struct blk_mq_ctx *dispatch_from;
+ unsigned int dispatch_busy;
- struct blk_mq_ctx **ctxs;
unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+ spinlock_t dispatch_wait_lock;
wait_queue_entry_t dispatch_wait;
atomic_t wait_index;
@@ -287,6 +289,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+/**
+ * blk_mq_mark_complete() - Set request state to complete
+ * @rq: request to set to complete state
+ *
+ * Returns true if request state was successfully set to complete. If
+ * successful, the caller is responsibile for seeing this request is ended, as
+ * blk_mq_complete_request will not work again.
+ */
+static inline bool blk_mq_mark_complete(struct request *rq)
+{
+ return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
+ MQ_RQ_IN_FLIGHT;
+}
+
/*
* Driver command data is immediately after the request. So subtract request
* size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3c4f390aea4b..f6dfb30737d8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -179,11 +179,9 @@ struct bio {
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- void *bi_cg_private;
+ struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
-#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
@@ -329,7 +327,7 @@ enum req_flag_bits {
/* for driver use */
__REQ_DRV,
-
+ __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
@@ -351,6 +349,7 @@ enum req_flag_bits {
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_DRV (1ULL << __REQ_DRV)
+#define REQ_SWAP (1ULL << __REQ_SWAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -358,6 +357,14 @@ enum req_flag_bits {
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
+enum stat_group {
+ STAT_READ,
+ STAT_WRITE,
+ STAT_DISCARD,
+
+ NR_STAT_GROUPS
+};
+
#define bio_op(bio) \
((bio)->bi_opf & REQ_OP_MASK)
#define req_op(req) \
@@ -395,6 +402,18 @@ static inline bool op_is_sync(unsigned int op)
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
+static inline bool op_is_discard(unsigned int op)
+{
+ return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
+}
+
+static inline int op_stat_group(unsigned int op)
+{
+ if (op_is_discard(op))
+ return STAT_DISCARD;
+ return op_is_write(op);
+}
+
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9154570edf29..d6869e0e2b64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,8 +27,6 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
-#include <linux/seqlock.h>
-#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -42,7 +40,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
-struct rq_wb;
+struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
@@ -442,10 +440,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- atomic_t shared_hctx_restart;
-
struct blk_queue_stats *stats;
- struct rq_wb *rq_wb;
+ struct rq_qos *rq_qos;
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
@@ -592,6 +588,7 @@ struct request_queue {
struct queue_limits limits;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
@@ -612,6 +609,7 @@ struct request_queue {
unsigned int nr_zones;
unsigned long *seq_zones_bitmap;
unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
/*
* sg stuff
@@ -800,11 +798,7 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
-{
- return q->nr_zones;
-}
-
+#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
@@ -820,6 +814,7 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
{
@@ -1070,6 +1065,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
}
+#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
@@ -1079,6 +1075,7 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
{
return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
}
+#endif /* CONFIG_BLK_DEV_ZONED */
/*
* Some commands like WRITE SAME have a payload or data transfer size which
@@ -1119,8 +1116,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
if (!q->limits.chunk_sectors)
return q->limits.max_sectors;
- return q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1));
+ return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1))));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
@@ -1437,8 +1434,6 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-
static inline unsigned long queue_segment_boundary(struct request_queue *q)
{
return q->limits.seg_boundary_mask;
@@ -1639,15 +1634,6 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
return 0;
}
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_nr_zones(q);
- return 0;
-}
-
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;
@@ -1877,6 +1863,28 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
bip_next->bip_vec[0].bv_offset);
}
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1950,12 +1958,24 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
return false;
}
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7942a96b1a9d..42515195d7d8 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -27,9 +27,20 @@ extern unsigned long max_pfn;
extern unsigned long long max_possible_pfn;
#ifndef CONFIG_NO_BOOTMEM
-/*
- * node_bootmem_map is a map pointer - the bits represent all physical
- * memory pages (including holes) on the node.
+/**
+ * struct bootmem_data - per-node information used by the bootmem allocator
+ * @node_min_pfn: the starting physical address of the node's memory
+ * @node_low_pfn: the end physical address of the directly addressable memory
+ * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
+ * memory pages (including holes) on the node.
+ * @last_end_off: the offset within the page of the end of the last allocation;
+ * if 0, the page used is full
+ * @hint_idx: the PFN of the page used with the last allocation;
+ * together with using this with the @last_end_offset field,
+ * a test can be made to see if allocations can be merged
+ * with the page used for the last allocation rather than
+ * using up a full new page.
+ * @list: list entry in the linked list ordered by the memory addresses
*/
typedef struct bootmem_data {
unsigned long node_min_pfn;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 975fb4cf1bb7..f91b0f8ff3a9 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,23 +2,48 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/rbtree.h>
#include <uapi/linux/bpf.h>
struct sock;
struct sockaddr;
struct cgroup;
struct sk_buff;
+struct bpf_map;
+struct bpf_prog;
struct bpf_sock_ops_kern;
+struct bpf_cgroup_storage;
#ifdef CONFIG_CGROUP_BPF
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+
+struct bpf_cgroup_storage_map;
+
+struct bpf_storage_buffer {
+ struct rcu_head rcu;
+ char data[0];
+};
+
+struct bpf_cgroup_storage {
+ struct bpf_storage_buffer *buf;
+ struct bpf_cgroup_storage_map *map;
+ struct bpf_cgroup_storage_key key;
+ struct list_head list;
+ struct rb_node node;
+ struct rcu_head rcu;
+};
+
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
+ struct bpf_cgroup_storage *storage;
};
struct bpf_prog_array;
@@ -76,6 +101,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
+{
+ struct bpf_storage_buffer *buf;
+
+ if (!storage)
+ return;
+
+ buf = READ_ONCE(storage->buf);
+ this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
+}
+
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type);
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
+int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
+void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -188,12 +233,48 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
\
__ret; \
})
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
#else
+struct bpf_prog;
struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
+static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
+ struct bpf_map *map) { return 0; }
+static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
+ struct bpf_map *map) {}
+static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+ struct bpf_prog *prog) { return 0; }
+static inline void bpf_cgroup_storage_free(
+ struct bpf_cgroup_storage *storage) {}
+
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 995c3b1e59bf..523481a3471b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,7 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
-struct btf;
+struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
@@ -48,8 +48,9 @@ struct bpf_map_ops {
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
- int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
- u32 key_type_id, u32 value_type_id);
+ int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
};
struct bpf_map {
@@ -85,6 +86,7 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN];
};
+struct bpf_offload_dev;
struct bpf_offloaded_map;
struct bpf_map_dev_ops {
@@ -117,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
- return map->ops->map_seq_show_elem && map->ops->map_check_btf;
+ return map->btf && map->ops->map_seq_show_elem;
}
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* function argument constraints */
@@ -154,6 +160,7 @@ enum bpf_arg_type {
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
+ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
};
@@ -281,6 +288,7 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ struct bpf_map *cgroup_storage;
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
@@ -347,12 +355,17 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
* The 'struct bpf_prog_array *' should only be replaced with xchg()
* since other cpus are walking the array of pointers in parallel.
*/
+struct bpf_prog_array_item {
+ struct bpf_prog *prog;
+ struct bpf_cgroup_storage *cgroup_storage;
+};
+
struct bpf_prog_array {
struct rcu_head rcu;
- struct bpf_prog *progs[0];
+ struct bpf_prog_array_item items[0];
};
-struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
@@ -370,7 +383,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
({ \
- struct bpf_prog **_prog, *__prog; \
+ struct bpf_prog_array_item *_item; \
+ struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
preempt_disable(); \
@@ -378,10 +392,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
goto _out; \
- _prog = _array->progs; \
- while ((__prog = READ_ONCE(*_prog))) { \
- _ret &= func(__prog, ctx); \
- _prog++; \
+ _item = &_array->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
+ _ret &= func(_prog, ctx); \
+ _item++; \
} \
_out: \
rcu_read_unlock(); \
@@ -434,6 +449,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -488,12 +505,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
struct xdp_buff;
+struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -509,6 +529,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
}
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+int array_map_alloc_check(union bpf_attr *attr);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -586,6 +607,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
return 0;
}
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -636,7 +666,15 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
void *key, void *next_key);
-bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
+
+struct bpf_offload_dev *bpf_offload_dev_create(void);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
@@ -684,6 +722,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog);
#else
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -702,6 +742,12 @@ static inline int sock_map_prog(struct bpf_map *map,
{
return -EOPNOTSUPP;
}
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_XDP_SOCKETS)
@@ -729,6 +775,33 @@ static inline void __xsk_map_flush(struct bpf_map *map)
}
#endif
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
+ void *key, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 map_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -748,6 +821,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_local_storage_proto;
+
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
index 5f8a4283092d..9d9ff755ec29 100644
--- a/include/linux/bpf_lirc.h
+++ b/include/linux/bpf_lirc.h
@@ -5,11 +5,12 @@
#include <uapi/linux/bpf.h>
#ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int lirc_prog_detach(const union bpf_attr *attr);
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
#else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
{
return -EINVAL;
}
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index c5700c2d5549..cd26c090e7c0 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -29,6 +29,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
#ifdef CONFIG_BPF_LIRC_MODE2
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
#endif
+#ifdef CONFIG_INET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -37,6 +40,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
@@ -57,4 +63,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
#endif
+#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
+#endif
#endif
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index 687b1760bb9f..f02cee0225d4 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -5,10 +5,10 @@
#include <uapi/linux/bpfilter.h>
struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
- int *optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
char __user *optval,
unsigned int optlen, bool is_set);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index daa9234a9baf..949e9af8d9d6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -45,6 +45,7 @@
#define PHY_ID_BCM7445 0x600d8510
#define PHY_ID_BCM_CYGNUS 0xae025200
+#define PHY_ID_BCM_OMEGA 0xae025100
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h
new file mode 100644
index 000000000000..bb007bd05e7a
--- /dev/null
+++ b/include/linux/build-salt.h
@@ -0,0 +1,20 @@
+#ifndef __BUILD_SALT_H
+#define __BUILD_SALT_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_BUILD_SALT 0x100
+
+#ifdef __ASSEMBLER__
+
+#define BUILD_SALT \
+ ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
+
+#else
+
+#define BUILD_SALT \
+ ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
+
+#endif
+
+#endif /* __BUILD_SALT_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 055aaf5ed9af..a83e1f632eb7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -143,7 +143,12 @@ u8 can_dlc2len(u8 can_dlc);
/* map the sanitized data length to an appropriate data length code */
u8 can_len2dlc(u8 len);
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs);
+#define alloc_candev(sizeof_priv, echo_skb_max) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
+#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index e75dfd1f1dec..528271c60018 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
struct packet_command
@@ -21,7 +22,7 @@ struct packet_command
unsigned char *buffer;
unsigned int buflen;
int stat;
- struct request_sense *sense;
+ struct scsi_sense_hdr *sshdr;
unsigned char data_direction;
int quiet;
int timeout;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index c0e68f903011..ff20b677fb9f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -438,6 +438,9 @@ struct cgroup {
/* used to store eBPF programs */
struct cgroup_bpf bpf;
+ /* If there is block congestion on this cgroup. */
+ atomic_t congestion_count;
+
/* ids of the ancestors at each level including self */
int ancestor_ids[];
};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c9fdf6f57913..32c553556bbd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -554,6 +554,36 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
}
/**
+ * cgroup_ancestor - find ancestor of cgroup
+ * @cgrp: cgroup to find ancestor of
+ * @ancestor_level: level of ancestor to find starting from root
+ *
+ * Find ancestor of cgroup at specified level starting from root if it exists
+ * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
+ * @ancestor_level.
+ *
+ * This function is safe to call as long as @cgrp is accessible.
+ */
+static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
+ int ancestor_level)
+{
+ struct cgroup *ptr;
+
+ if (cgrp->level < ancestor_level)
+ return NULL;
+
+ for (ptr = cgrp;
+ ptr && ptr->level > ancestor_level;
+ ptr = cgroup_parent(ptr))
+ ;
+
+ if (ptr && ptr->level == ancestor_level)
+ return ptr;
+
+ return NULL;
+}
+
+/**
* task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
* @task: the task to be tested
* @ancestor: possible ancestor of @task's cgroup
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b7cfa037e593..08b1aa70a38d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -38,6 +38,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
+/* duty cycle call may be forwarded to the parent clock */
+#define CLK_DUTY_CYCLE_PARENT BIT(13)
struct clk;
struct clk_hw;
@@ -67,6 +69,17 @@ struct clk_rate_request {
};
/**
+ * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+ */
+struct clk_duty {
+ unsigned int num;
+ unsigned int den;
+};
+
+/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
@@ -169,6 +182,15 @@ struct clk_rate_request {
* by the second argument. Valid values for degrees are
* 0-359. Return 0 on success, otherwise -EERROR.
*
+ * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
+ * of a clock. Returned values denominator cannot be 0 and must be
+ * superior or equal to the numerator.
+ *
+ * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
+ * the numerator (2nd argurment) and denominator (3rd argument).
+ * Argument must be a valid ratio (denominator > 0
+ * and >= numerator) Return 0 on success, otherwise -EERROR.
+ *
* @init: Perform platform-specific initialization magic.
* This is not not used by any of the basic clock types.
* Please consider other ways of solving initialization problems
@@ -218,6 +240,10 @@ struct clk_ops {
unsigned long parent_accuracy);
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
+ int (*get_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*set_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
void (*init)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0dbd0885b2c2..4f750c481b82 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -142,6 +142,27 @@ int clk_set_phase(struct clk *clk, int degrees);
int clk_get_phase(struct clk *clk);
/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
+
+/**
+ * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio multiplied by the scale provided, otherwise
+ * returns -EERROR.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+
+/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
* @q: clk compared against p
@@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
+ unsigned int den)
+{
+ return -ENOTSUPP;
+}
+
+static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
+ unsigned int scale)
+{
+ return 0;
+}
+
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
{
return p == q;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 7dff1963c185..308918928767 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b1a5562b3215..1a3c4f37e908 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -72,6 +72,9 @@
*/
#ifndef COMPAT_SYSCALL_DEFINEx
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_compat_sys##name)))); \
@@ -80,8 +83,11 @@
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
- return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
} \
+ __diag_pop(); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* COMPAT_SYSCALL_DEFINEx */
@@ -109,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t;
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
struct compat_utimbuf {
compat_time_t actime;
compat_time_t modtime;
@@ -294,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
extern int compat_put_timespec(const struct timespec *, void __user *);
extern int compat_get_timeval(struct timeval *, const void __user *);
extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
struct compat_iovec {
compat_uptr_t iov_base;
@@ -1022,6 +1019,17 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
return ctv;
}
+/*
+ * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
+ * directly. Instead, use one of the functions which work equivalently, such
+ * as the kcompat_sys_xyzyyz() functions prototyped below.
+ */
+
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
index 31f2774f1994..e70bfd1d2c3f 100644
--- a/include/linux/compat_time.h
+++ b/include/linux/compat_time.h
@@ -17,7 +17,16 @@ struct compat_timeval {
s32 tv_usec;
};
+struct compat_itimerspec {
+ struct compat_timespec it_interval;
+ struct compat_timespec it_value;
+};
+
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+extern int get_compat_itimerspec64(struct itimerspec64 *its,
+ const struct compat_itimerspec __user *uits);
+extern int put_compat_itimerspec64(const struct itimerspec64 *its,
+ struct compat_itimerspec __user *uits);
#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index f1a7492a5cc8..573f5a7d42d4 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,25 +66,40 @@
#endif
/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline __attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
+/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old.
* GCC does not warn about unused static inline functions for
* -Wunused-function. This turns out to avoid the need for complex #ifdef
* directives. Suppress the warning in clang as well by using "unused"
* function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
-#define __inline __inline __attribute__((always_inline,unused)) notrace
+#define inline \
+ inline __attribute__((always_inline, unused)) notrace __gnu_inline
#else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline __attribute__((unused)) notrace
-#define __inline__ __inline__ __attribute__((unused)) notrace
-#define __inline __inline __attribute__((unused)) notrace
+#define inline inline __attribute__((unused)) notrace __gnu_inline
#endif
+#define __inline__ inline
+#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
@@ -347,3 +362,28 @@
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_GCC(version, severity, s) \
+ __diag_GCC_ ## version(__diag_GCC_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore ignored
+#define __diag_GCC_warn warning
+#define __diag_GCC_error error
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
+#endif
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s) __diag(s)
+#else
+#define __diag_GCC_8(s)
+#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6b79a9bba9a7..a8ba6b04152c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+ __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+ __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+ __diag_ ## compiler(version, error, option)
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index dfd6b0e97855..f59f3dbca65c 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -21,6 +21,7 @@ struct console_font_op;
struct console_font;
struct module;
struct tty_struct;
+struct notifier_block;
/*
* this is what the terminal answers to a ESC-Z or csi0c query.
@@ -220,4 +221,8 @@ static inline bool vgacon_text_force(void) { return false; }
extern void console_init(void);
+/* For deferred console takeover */
+void dummycon_register_output_notifier(struct notifier_block *nb);
+void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a97a63eef59f..218df7f4d3e1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -30,7 +30,7 @@ struct cpu {
};
extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);
@@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -103,6 +105,7 @@ extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
+extern int cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -115,6 +118,7 @@ static inline void cpus_write_lock(void) { }
static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@@ -166,4 +170,23 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_check_topology_early(void);
+extern void cpu_smt_check_topology(void);
+#else
+# define cpu_smt_control (CPU_SMT_ENABLED)
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_check_topology_early(void) { }
+static inline void cpu_smt_check_topology(void) { }
+#endif
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8796ba387152..4cf06a64bc02 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -164,6 +164,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bf53d893ad02..147bdec42215 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask;
#define cpu_active(cpu) ((cpu) == 0)
#endif
-/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+ WARN_ON_ONCE(cpu >= bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+}
+
+/* verify cpu argument to cpumask_* operators */
+static inline unsigned int cpumask_check(unsigned int cpu)
+{
+ cpu_max_bits_warn(cpu, nr_cpumask_bits);
return cpu;
}
@@ -154,6 +159,13 @@ static inline unsigned int cpumask_next_and(int n,
return n+1;
}
+static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
+ int start, bool wrap)
+{
+ /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
+ return (wrap && n == 0);
+}
+
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
unsigned int cpu)
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..62c4b7790a28
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRC32_POLY_LE 0xedb88320
+#define CRC32_POLY_BE 0x04c11db7
+
+/*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+ * x^8+x^6+x^0
+ */
+#define CRC32C_POLY_LE 0x82F63B78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 631286535d0f..7eed6101c791 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -65,6 +65,12 @@ extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
+extern int groups_search(const struct group_info *, kgid_t);
+
+extern int set_current_groups(struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
+extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
#else
static inline void groups_free(struct group_info *group_info)
{
@@ -78,12 +84,11 @@ static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
+static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+{
+ return 1;
+}
#endif
-extern int set_current_groups(struct group_info *);
-extern void set_groups(struct cred *, struct group_info *);
-extern int groups_search(const struct group_info *, kgid_t);
-extern bool may_setgroups(void);
-extern void groups_sort(struct group_info *);
/*
* The security context of a task
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6eb06101089f..e8839d3a7559 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -113,6 +113,11 @@
#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
/*
+ * Don't trigger module loading
+ */
+#define CRYPTO_NOLOAD 0x00008000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 3855e3800f48..deb0f663252f 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
enum page_entry_size pe_size, pfn_t pfn);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 66c6e17e61e5..d32957b423d5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -227,7 +227,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -271,8 +270,6 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, const struct qstr *);
-
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index e6c0448ebcc7..31c865d1842e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
static inline void delayacct_blkio_end(struct task_struct *p)
{
- if (current->delays)
+ if (p->delays)
__delayacct_blkio_end(p);
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
}
diff --git a/include/linux/device.h b/include/linux/device.h
index 055a69dbcd18..2a562f4ded07 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -90,7 +90,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @num_vf: Called to find out how many virtual functions a device on this
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
- this bus.
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -384,6 +384,9 @@ int subsys_virtual_register(struct bus_type *subsys,
* @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ * for the devices belonging to the class. Usually tied to
+ * device's namespace.
* @pm: The default device power management operations of this class.
* @p: The private data of the driver core, no one other than the
* driver core can touch this.
@@ -413,6 +416,8 @@ struct class {
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
+ void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+
const struct dev_pm_ops *pm;
struct subsys_private *p;
@@ -784,14 +789,16 @@ enum device_link_state {
* Device link flags.
*
* STATELESS: The core won't track the presence of supplier/consumer drivers.
- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
*/
-#define DL_FLAG_STATELESS BIT(0)
-#define DL_FLAG_AUTOREMOVE BIT(1)
-#define DL_FLAG_PM_RUNTIME BIT(2)
-#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
/**
* struct device_link - Device link representation.
@@ -886,6 +893,8 @@ struct dev_links_info {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
+ * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
+ * limit than the device itself supports.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
@@ -912,8 +921,6 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
- * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
- * indicates support for a higher limit in the dma_mask field.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -967,6 +974,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
+ u64 bus_dma_mask; /* upstream dma_mask constraint */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;
@@ -1002,7 +1010,6 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
- bool dma_32bit_limit:1;
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -1316,6 +1323,7 @@ extern const char *dev_driver_string(const struct device *dev);
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
+void device_link_remove(void *consumer, struct device *supplier);
#ifdef CONFIG_PRINTK
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 085db2fee2d7..58725f890b5b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
+ * @map_atomic: [optional] maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
* @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
+ * @map: [optional] maps a page from the buffer into kernel address space.
* @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
- * &device can access the provided &dma_buf. Exporters which support
- * buffer objects in special locations like VRAM or device-specific
- * carveout areas should check whether the buffer could be move to
- * system memory (or directly accessed by the provided device), and
- * otherwise need to fail the attach operation.
+ * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+ * which support buffer objects in special locations like VRAM or
+ * device-specific carveout areas should check whether the buffer could
+ * be move to system memory (or directly accessed by the provided
+ * device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
* allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
* to signal that backing storage is already allocated and incompatible
* with the requirements of requesting device.
*/
- int (*attach)(struct dma_buf *, struct device *,
- struct dma_buf_attachment *);
+ int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
/**
* @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map_atomic)(struct dma_buf *, unsigned long);
- void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*map)(struct dma_buf *, unsigned long);
void (*unmap)(struct dma_buf *, unsigned long, void *);
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index b67bf6ac907d..3c5a4cb3eb95 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -48,7 +48,7 @@
* CMA should not be used by the device drivers directly. It is
* only a helper framework for dma-mapping subsystem.
*
- * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ * For more information, see kernel-docs in kernel/dma/contiguous.c
*/
#ifdef __KERNEL__
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index eb9b05aa5aea..02dba8cd033d 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -166,7 +166,8 @@ struct dma_fence_ops {
* released when the fence is signalled (through e.g. the interrupt
* handler).
*
- * This callback is mandatory.
+ * This callback is optional. If this callback is not present, then the
+ * driver must always have signaling enabled.
*/
bool (*enable_signaling)(struct dma_fence *fence);
@@ -190,11 +191,14 @@ struct dma_fence_ops {
/**
* @wait:
*
- * Custom wait implementation, or dma_fence_default_wait.
+ * Custom wait implementation, defaults to dma_fence_default_wait() if
+ * not set.
*
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
+ * The dma_fence_default_wait implementation should work for any fence, as long
+ * as @enable_signaling works correctly. This hook allows drivers to
+ * have an optimized version for the case where a process context is
+ * already available, e.g. if @enable_signaling for the general case
+ * needs to set up a worker thread.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -202,7 +206,7 @@ struct dma_fence_ops {
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
*
- * This callback is mandatory.
+ * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -218,17 +222,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fill_driver_data:
- *
- * Callback to fill in free-form debug info.
- *
- * Returns amount of bytes filled, or negative error on failure.
- *
- * This callback is optional.
- */
- int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
-
- /**
* @fence_value_str:
*
* Callback to fill in free-form debug info specific to this fence, like
@@ -242,8 +235,9 @@ struct dma_fence_ops {
* @timeline_value_str:
*
* Fills in the current value of the timeline as a string, like the
- * sequence number. This should match what @fill_driver_data prints for
- * the most recently signalled fence (assuming no delayed signalling).
+ * sequence number. Note that the specific fence passed to this function
+ * should not matter, drivers should only use it to look up the
+ * corresponding timeline structures.
*/
void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f9cc309507d9..1db6a6b46d0d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -538,10 +538,17 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
- WARN_ON(irqs_disabled());
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return;
+ /*
+ * On non-coherent platforms which implement DMA-coherent buffers via
+ * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
+ * this far in IRQ context is a) at risk of a BUG_ON() or trying to
+ * sleep on some machines, and b) an indication that the driver is
+ * probably misusing the coherent API anyway.
+ */
+ WARN_ON(irqs_disabled());
if (!ops->free || !cpu_addr)
return;
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 10b2654d549b..a0aa00cc909d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(struct device *dev);
+#else
+static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index e56ec7af4fd7..9fc594f69eff 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -9,6 +9,15 @@ enum pxad_chan_prio {
PXAD_PRIO_LOWEST,
};
+/**
+ * struct pxad_param - dma channel request parameters
+ * @drcmr: requestor line number
+ * @prio: minimal mandatory priority of the channel
+ *
+ * If a requested channel is granted, its priority will be at least @prio,
+ * ie. if PXAD_PRIO_LOW is required, the requested channel will be either
+ * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
+ */
struct pxad_param {
unsigned int drcmr;
enum pxad_chan_prio prio;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 56add823f190..401e4b254e30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
void *flush;
} efi_file_handle_t;
+typedef struct {
+ u64 revision;
+ u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open_volume;
+} efi_file_io_interface_64_t;
+
typedef struct _efi_file_io_interface {
u64 revision;
int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 79563840c295..572e11bb8696 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
/* Reserved Ethernet Addresses per IEEE 802.1Q */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 7094718b653b..ffcc7724ca21 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -11,6 +11,7 @@
#include <linux/fcntl.h>
#include <linux/wait.h>
+#include <linux/err.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
diff --git a/include/linux/file.h b/include/linux/file.h
index 279720db984a..6b2fb032416c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -17,9 +17,12 @@ extern void fput(struct file *);
struct file_operations;
struct vfsmount;
struct dentry;
+struct inode;
struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
- const struct file_operations *fop);
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+ const struct file_operations *);
static inline void fput_light(struct file *file, int fput_needed)
{
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 45fc0f5000d8..5d565c50bcb2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,6 +19,7 @@
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
@@ -31,6 +32,7 @@ struct seccomp_data;
struct bpf_prog_aux;
struct xdp_rxq_info;
struct xdp_buff;
+struct sock_reuseport;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -469,15 +471,16 @@ struct sock_fprog_kern {
};
struct bpf_binary_header {
- unsigned int pages;
- u8 image[];
+ u32 pages;
+ /* Some arches need word alignment for their instructions */
+ u8 image[] __aligned(4);
};
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
- locked:1, /* Program image locked? */
+ undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -535,6 +538,20 @@ struct sk_msg_buff {
struct list_head list;
};
+struct bpf_redirect_info {
+ u32 ifindex;
+ u32 flags;
+ struct bpf_map *map;
+ struct bpf_map *map_to_flush;
+ unsigned long map_owner;
+ u32 kern_flags;
+};
+
+DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!)
@@ -671,51 +688,28 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
+ fp->undo_set_mem = 1;
+ set_memory_ro((unsigned long)fp, fp->pages);
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
- if (fp->locked) {
- WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
- /* In case set_memory_rw() fails, we want to be the first
- * to crash here instead of some random place later on.
- */
- fp->locked = 0;
- }
+ if (fp->undo_set_mem)
+ set_memory_rw((unsigned long)fp, fp->pages);
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
+ set_memory_ro((unsigned long)hdr, hdr->pages);
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
+ set_memory_rw((unsigned long)hdr, hdr->pages);
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
-}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
-
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
{
@@ -759,6 +753,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -786,6 +781,42 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static inline bool xdp_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_set_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_clear_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+ unsigned int pktlen)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (pktlen > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
@@ -804,6 +835,20 @@ void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
+#ifdef CONFIG_INET
+struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash);
+#else
+static inline struct sock *
+bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
@@ -961,6 +1006,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
}
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5c91108846db..1ec33fd0423f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_OPENED ((__force fmode_t)0x80000)
+#define FMODE_CREATED ((__force fmode_t)0x100000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -275,6 +278,7 @@ struct writeback_control;
/*
* Write life time hint values.
+ * Stored in struct inode as u8.
*/
enum rw_hint {
WRITE_LIFE_NOT_SET = 0,
@@ -609,8 +613,8 @@ struct inode {
struct timespec64 i_ctime;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
- unsigned int i_blkbits;
- enum rw_hint i_write_hint;
+ u8 i_blkbits;
+ u8 i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -685,6 +689,17 @@ static inline int inode_unhashed(struct inode *inode)
}
/*
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+ hlist_add_fake(&inode->i_hash);
+}
+
+/*
* inode->i_mutex nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
@@ -1720,8 +1735,6 @@ struct file_operations {
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
- struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
- __poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1778,7 +1791,7 @@ struct inode_operations {
int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
} ____cacheline_aligned;
@@ -2016,6 +2029,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
*
+ * I_CREATING New object's inode in the middle of setting up.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2036,7 +2051,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
+#define I_OVL_INUSE (1 << 14)
+#define I_CREATING (1 << 15)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2422,6 +2438,10 @@ extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
+static inline struct file *file_clone_open(struct file *file)
+{
+ return dentry_open(&file->f_path, file->f_flags, file->f_cred);
+}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
@@ -2429,13 +2449,8 @@ extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
-enum {
- FILE_CREATED = 1,
- FILE_OPENED = 2
-};
extern int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened);
+ int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
/* fs/ioctl.c */
@@ -2623,8 +2638,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
- loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
@@ -2919,6 +2932,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..941b11811f85 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
#define __FSL_GUTS_H__
#include <linux/types.h>
+#include <linux/io.h>
/**
* Global Utility Registers.
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b462d9ea8007..c1f003aadcce 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -11,9 +11,8 @@
/*
* qoriq ptp registers
- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
*/
-struct qoriq_ptp_registers {
+struct ctrl_regs {
u32 tmr_ctrl; /* Timer control register */
u32 tmr_tevent; /* Timestamp event register */
u32 tmr_temask; /* Timer event mask register */
@@ -28,22 +27,47 @@ struct qoriq_ptp_registers {
u8 res1[4];
u32 tmroff_h; /* Timer offset high */
u32 tmroff_l; /* Timer offset low */
- u8 res2[8];
+};
+
+struct alarm_regs {
u32 tmr_alarm1_h; /* Timer alarm 1 high register */
u32 tmr_alarm1_l; /* Timer alarm 1 high register */
u32 tmr_alarm2_h; /* Timer alarm 2 high register */
u32 tmr_alarm2_l; /* Timer alarm 2 high register */
- u8 res3[48];
+};
+
+struct fiper_regs {
u32 tmr_fiper1; /* Timer fixed period interval */
u32 tmr_fiper2; /* Timer fixed period interval */
u32 tmr_fiper3; /* Timer fixed period interval */
- u8 res4[20];
+};
+
+struct etts_regs {
u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
};
+struct qoriq_ptp_registers {
+ struct ctrl_regs __iomem *ctrl_regs;
+ struct alarm_regs __iomem *alarm_regs;
+ struct fiper_regs __iomem *fiper_regs;
+ struct etts_regs __iomem *etts_regs;
+};
+
+/* Offset definitions for the four register groups */
+#define CTRL_REGS_OFFSET 0x0
+#define ALARM_REGS_OFFSET 0x40
+#define FIPER_REGS_OFFSET 0x80
+#define ETTS_REGS_OFFSET 0xa0
+
+#define FMAN_CTRL_REGS_OFFSET 0x80
+#define FMAN_ALARM_REGS_OFFSET 0xb8
+#define FMAN_FIPER_REGS_OFFSET 0xd0
+#define FMAN_ETTS_REGS_OFFSET 0xe0
+
+
/* Bit definitions for the TMR_CTRL register */
#define ALM1P (1<<31) /* Alarm1 output polarity */
#define ALM2P (1<<30) /* Alarm2 output polarity */
@@ -103,12 +127,16 @@ struct qoriq_ptp_registers {
#define DRIVER "ptp_qoriq"
-#define DEFAULT_CKSEL 1
#define N_EXT_TS 2
-#define REG_SIZE sizeof(struct qoriq_ptp_registers)
+
+#define DEFAULT_CKSEL 1
+#define DEFAULT_TMR_PRSC 2
+#define DEFAULT_FIPER1_PERIOD 1000000000
+#define DEFAULT_FIPER2_PERIOD 100000
struct qoriq_ptp {
- struct qoriq_ptp_registers __iomem *regs;
+ void __iomem *base;
+ struct qoriq_ptp_registers regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8154f4920fcb..ebb77674be90 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
*/
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
{
return 0;
}
-static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 4fe8f289b3f6..faebf0ca0686 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -45,7 +45,7 @@ struct fwnode_endpoint {
struct fwnode_reference_args {
struct fwnode_handle *fwnode;
unsigned int nargs;
- unsigned int args[NR_FWNODE_REFERENCE_ARGS];
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
};
/**
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6cb8a5789668..57864422a2c8 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
+#include <linux/blk_types.h>
#ifdef CONFIG_BLOCK
@@ -82,10 +83,10 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
- unsigned long sectors[2]; /* READs and WRITEs */
- unsigned long ios[2];
- unsigned long merges[2];
- unsigned long ticks[2];
+ unsigned long sectors[NR_STAT_GROUPS];
+ unsigned long ios[NR_STAT_GROUPS];
+ unsigned long merges[NR_STAT_GROUPS];
+ unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
};
@@ -353,6 +354,11 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_accum(part, field) \
+ (part_stat_read(part, field[STAT_READ]) + \
+ part_stat_read(part, field[STAT_WRITE]) + \
+ part_stat_read(part, field[STAT_DISCARD]))
+
#define part_stat_add(cpu, part, field, addnd) do { \
__part_stat_add((cpu), (part), field, addnd); \
if ((part)->partno) \
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 91ed23468530..39745b8bdd65 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -14,7 +14,7 @@
#include <linux/errno.h>
-/* see Documentation/gpio/gpio-legacy.txt */
+/* see Documentation/driver-api/gpio/legacy.rst */
/* make these flag values available regardless of GPIO kconfig options */
#define GPIOF_DIR_OUT (0 << 0)
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
new file mode 100644
index 000000000000..1bfb3cdc86d0
--- /dev/null
+++ b/include/linux/gpio/aspeed.h
@@ -0,0 +1,15 @@
+#ifndef __GPIO_ASPEED_H
+#define __GPIO_ASPEED_H
+
+struct aspeed_gpio_copro_ops {
+ int (*request_access)(void *data);
+ int (*release_access)(void *data);
+};
+
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
+
+
+#endif /* __GPIO_ASPEED_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 243112c7fa7d..21ddbe440030 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -41,11 +41,8 @@ enum gpiod_flags {
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
- GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
- GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN,
- GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
- GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
- GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
};
#ifdef CONFIG_GPIOLIB
@@ -145,6 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
+void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -467,6 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
return -EINVAL;
}
+static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 5382b5183b7e..0ea328e71ec9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -201,6 +201,8 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
* @reg_set: output set register (out=high) for generic GPIO
* @reg_clr: output clear register (out=low) for generic GPIO
* @reg_dir: direction setting register for generic GPIO
+ * @bgpio_dir_inverted: indicates that the direction register is inverted
+ * (gpiolib private state variable)
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
* @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
@@ -267,6 +269,7 @@ struct gpio_chip {
void __iomem *reg_set;
void __iomem *reg_clr;
void __iomem *reg_dir;
+ bool bgpio_dir_inverted;
int bgpio_bits;
spinlock_t bgpio_lock;
unsigned long bgpio_data;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 41a3d5775394..773bcb1d4044 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -511,6 +511,7 @@ struct hid_output_fifo {
#define HID_STAT_ADDED BIT(0)
#define HID_STAT_PARSED BIT(1)
#define HID_STAT_DUP_DETECTED BIT(2)
+#define HID_STAT_REPROBED BIT(3)
struct hid_input {
struct list_head list;
@@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */
bool battery_avoid_query;
#endif
- unsigned int status; /* see STAT flags above */
+ unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
bool io_started; /* If IO has started */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index e5fd2707b6df..9493d4a388db 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -93,6 +93,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
@@ -187,12 +188,16 @@ enum hwmon_power_attributes {
hwmon_power_cap_hyst,
hwmon_power_cap_max,
hwmon_power_cap_min,
+ hwmon_power_min,
hwmon_power_max,
hwmon_power_crit,
+ hwmon_power_lcrit,
hwmon_power_label,
hwmon_power_alarm,
hwmon_power_cap_alarm,
+ hwmon_power_min_alarm,
hwmon_power_max_alarm,
+ hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
};
@@ -213,12 +218,16 @@ enum hwmon_power_attributes {
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MIN BIT(hwmon_power_min)
#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
#define HWMON_P_CRIT BIT(hwmon_power_crit)
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
enum hwmon_energy_attributes {
@@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev,
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
+/**
+ * hwmon_is_bad_char - Is the char invalid in a hwmon name
+ * @ch: the char to be considered
+ *
+ * hwmon_is_bad_char() can be used to determine if the given character
+ * may not be used in a hwmon name.
+ *
+ * Returns true if the char is invalid, false otherwise.
+ */
+static inline bool hwmon_is_bad_char(const char ch)
+{
+ switch (ch) {
+ case '-':
+ case '*':
+ case ' ':
+ case '\t':
+ case '\n':
+ return true;
+ default:
+ return false;
+ }
+}
+
#endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 254cd34eeae2..465afb092fa7 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -140,9 +140,14 @@ extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
and probably just as fast.
Note that we use i2c_adapter here, because you do not need a specific
smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data *data);
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
+
+/* Unlocked flavor */
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
new file mode 100644
index 000000000000..bdc0293fb6cb
--- /dev/null
+++ b/include/linux/idle_inject.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#ifndef __IDLE_INJECT_H__
+#define __IDLE_INJECT_H__
+
+/* private idle injection device structure */
+struct idle_inject_device;
+
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+
+void idle_inject_unregister(struct idle_inject_device *ii_dev);
+
+int idle_inject_start(struct idle_inject_device *ii_dev);
+
+void idle_inject_stop(struct idle_inject_device *ii_dev);
+
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_ms,
+ unsigned int idle_duration_ms);
+
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_ms,
+ unsigned int *idle_duration_ms);
+#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 8fe7e4306816..9c03a7d5e400 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation {
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
/*
- * A-PMDU buffer sizes
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
*/
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF 0x100
/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation {
__le16 basic_mcs_set;
} __packed;
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ *
+ * This structure is the "HE capabilities element" fixed fields as
+ * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[5];
+ u8 phy_cap_info[9];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE capabilities element
+ *
+ * This structure is the "HE operation element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.238
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
+ u8 optional[0];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ *
+ * This structure is the "MU AC Parameter Record" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ *
+ * This structure is the "MU EDCA Parameter Set element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
/* 802.11ac VHT Capabilities */
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
@@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
+#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
+#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, checked already in
+ * ieee802_11_parse_elems_crc()
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+ oper_len++;
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
+ WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_HE_CAPABILITY = 35,
+ WLAN_EID_EXT_HE_OPERATION = 36,
+ WLAN_EID_EXT_UORA = 37,
+ WLAN_EID_EXT_HE_MU_EDCA = 38,
};
/* Action category code */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 7843b98e1c6e..c20c7e197d07 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
{
- return -1;
+ return -EINVAL;
}
static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo)
{
- return -1;
+ return -EINVAL;
}
#endif
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index d95cae09dea0..ac42da56f7a2 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -74,6 +74,11 @@ struct team_port {
long mode_priv[0];
};
+static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
static inline bool team_port_enabled(struct team_port *port)
{
return port->index != -1;
@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
return port->linkup && team_port_enabled(port);
}
+static inline bool team_port_dev_txable(const struct net_device *port_dev)
+{
+ struct team_port *port;
+ bool txable;
+
+ rcu_read_lock();
+ port = team_port_get_rcu(port_dev);
+ txable = port ? team_port_txable(port) : false;
+ rcu_read_unlock();
+
+ return txable;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f8231854b5d6..119f53941c12 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -109,6 +109,8 @@ struct ip_mc_list {
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
extern void ip_mc_drop_socket(struct sock *sk);
extern int ip_mc_source(int add, int omode, struct sock *sk,
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 767467d886de..67c75372b691 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..97914a2833d1 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,14 +11,16 @@
#define _LINUX_IMA_H
#include <linux/fs.h>
+#include <linux/security.h>
#include <linux/kexec.h>
struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
+extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_load_data(enum kernel_load_data_id id);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
@@ -34,7 +36,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_check(struct file *file, int mask)
{
return 0;
}
@@ -49,6 +51,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
+static inline int ima_load_data(enum kernel_load_data_id id)
+{
+ return 0;
+}
+
static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
{
return 0;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 27650f1bff3d..c759d1cbcedd 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -93,6 +93,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
+#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index d7188de4db96..3f4bf60b0bb5 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
return axis == ABS_MT_SLOT || input_is_mt_value(axis);
}
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
void input_mt_report_finger_count(struct input_dev *dev, int count);
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 858d3f4a2241..54c853ec2fd1 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -44,4 +44,17 @@ static inline void integrity_load_keys(void)
}
#endif /* CONFIG_INTEGRITY */
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+
+extern int integrity_kernel_module_request(char *kmod_name);
+
+#else
+
+static inline int integrity_kernel_module_request(char *kmod_name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1df940196ab2..ef169d67df92 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -121,6 +121,7 @@
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
+#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index a044a824da85..3555d54bf79a 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -2,6 +2,9 @@
#ifndef LINUX_IOMAP_H
#define LINUX_IOMAP_H 1
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/mm.h>
#include <linux/types.h>
struct address_space;
@@ -9,6 +12,7 @@ struct fiemap_extent_info;
struct inode;
struct iov_iter;
struct kiocb;
+struct page;
struct vm_area_struct;
struct vm_fault;
@@ -29,6 +33,7 @@ struct vm_fault;
*/
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
+#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
@@ -55,6 +60,16 @@ struct iomap {
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
+ void *inline_data;
+ void *private; /* filesystem private */
+
+ /*
+ * Called when finished processing a page in the mapping returned in
+ * this iomap. At least for now this is only supported in the buffered
+ * write path.
+ */
+ void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page, struct iomap *iomap);
};
/*
@@ -86,8 +101,40 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
+/*
+ * Structure allocate for each page when block size < PAGE_SIZE to track
+ * sub-page uptodate status and I/O completions.
+ */
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+};
+
+static inline struct iomap_page *to_iomap_page(struct page *page)
+{
+ if (page_has_private(page))
+ return (struct iomap_page *)page_private(page);
+ return NULL;
+}
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
+int iomap_readpage(struct page *page, const struct iomap_ops *ops);
+int iomap_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, const struct iomap_ops *ops);
+int iomap_set_page_dirty(struct page *page);
+int iomap_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count);
+int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+void iomap_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int len);
+#ifdef CONFIG_MIGRATION
+int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+#else
+#define iomap_migrate_page NULL
+#endif
int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 6cc2df7f7ac9..e1c9eea6015b 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/uidgid.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
#include <linux/refcount.h>
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index b5630c8eb2f3..6cea726612b7 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -9,7 +9,7 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/refcount.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
struct user_namespace;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4bd2f34947f4..201de12a9957 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -503,6 +503,7 @@ struct irq_chip {
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index cbb872c1b607..9d2ea3e907d0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,6 +73,7 @@
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
phys_addr_t phys_base;
} __percpu *rdist;
struct page *prop_page;
- int id_bits;
u64 flags;
+ u32 gicd_typer;
bool has_vlpis;
bool has_direct_lpi;
};
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 25b33b664537..dd1e40ddac7d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
return desc->irq_common_data.handler_data;
}
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
- return desc->irq_common_data.msi_desc;
-}
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt.
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b46b541c67c4..1a0b6f17a5d6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -299,12 +299,18 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DEFINE_STATIC_KEY_TRUE_RO(name) \
+ struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
+
#define DECLARE_STATIC_KEY_TRUE(name) \
extern struct static_key_true name
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DEFINE_STATIC_KEY_FALSE_RO(name) \
+ struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
+
#define DECLARE_STATIC_KEY_FALSE(name) \
extern struct static_key_false name
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d23123238534..941dc0a5a877 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -666,7 +666,7 @@ do { \
* your code. (Extra memory is used for special buffers that are
* allocated when trace_printk() is used.)
*
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index ab25c8b6d9e3..814643f7ee52 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/uidgid.h>
#include <linux/wait.h>
struct file;
@@ -325,12 +326,14 @@ void kernfs_destroy_root(struct kernfs_root *root);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
- const char *name,
- umode_t mode, loff_t size,
+ const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key);
@@ -415,12 +418,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, void *priv, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid,
+ void *priv, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
@@ -498,12 +503,15 @@ static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
- return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
+ return kernfs_create_dir_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ priv, NULL);
}
static inline struct kernfs_node *
kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns)
{
struct lock_class_key *key = NULL;
@@ -511,15 +519,17 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
key = (struct lock_class_key *)&ops->lockdep_key;
#endif
- return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- key);
+ return __kernfs_create_file(parent, name, mode, uid, gid,
+ size, ops, priv, ns, key);
}
static inline struct kernfs_node *
kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
loff_t size, const struct kernfs_ops *ops, void *priv)
{
- return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+ return kernfs_create_file_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ size, ops, priv, NULL);
}
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 7f6f93c3df9c..b49ff230beba 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -26,6 +26,7 @@
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
+#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
@@ -114,6 +115,8 @@ extern struct kobject * __must_check kobject_get_unless_zero(
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
+extern void kobject_get_ownership(struct kobject *kobj,
+ kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {
@@ -122,6 +125,7 @@ struct kobj_type {
struct attribute **default_attrs;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
+ void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9440a2fc8893..e909413e4e38 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -63,7 +63,6 @@ struct pt_regs;
struct kretprobe;
struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
*/
kprobe_fault_handler_t fault_handler;
- /*
- * ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it.
- */
- kprobe_break_handler_t break_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -155,24 +148,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
}
/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
- struct kprobe kp;
- void *entry; /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler) (handler)
-
-/*
* Function-return probe -
* Note:
* User needs to provide a handler function, and initialize maxactive.
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline void jprobe_return(void)
-{
-}
static inline int register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
}
#endif /* CONFIG_KPROBES */
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
- return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
{
return enable_kprobe(&rp->kp);
}
-static inline int disable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
#ifndef CONFIG_KPROBES
static inline bool is_kprobe_insn_slot(unsigned long addr)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2803264c512f..c1961761311d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
-void kthread_park_complete(struct task_struct *k);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5b9fddbaac41..b2bb44f87f5a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt))
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+ return kt;
+}
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b7e82550e655..834683d603f9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -253,7 +253,7 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
struct led_trigger {
/* Trigger Properties */
const char *name;
- void (*activate)(struct led_classdev *led_cdev);
+ int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
/* LEDs under control by this trigger (for simple triggers) */
@@ -262,8 +262,19 @@ struct led_trigger {
/* Link to next registered trigger */
struct list_head next_trig;
+
+ const struct attribute_group **groups;
};
+/*
+ * Currently the attributes in struct led_trigger::groups are added directly to
+ * the LED device. As this might change in the future, the following
+ * macros abstract getting the LED device and its trigger_data from the dev
+ * parameter passed to the attribute accessor functions.
+ */
+#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
+#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
+
ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
@@ -288,10 +299,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert);
extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
+extern int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
extern void led_trigger_remove(struct led_classdev *led_cdev);
+static inline void led_set_trigger_data(struct led_classdev *led_cdev,
+ void *trigger_data)
+{
+ led_cdev->trigger_data = trigger_data;
+}
+
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return led_cdev->trigger_data;
@@ -315,6 +332,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
extern void led_trigger_rename_static(const char *name,
struct led_trigger *trig);
+#define module_led_trigger(__led_trigger) \
+ module_driver(__led_trigger, led_trigger_register, \
+ led_trigger_unregister)
+
#else
/* Trigger has no members */
@@ -334,9 +355,14 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
-static inline void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger) {}
+static inline int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger)
+{
+ return 0;
+}
+
static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return NULL;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 8b8946dd63b9..bc4f87cbe7f4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
@@ -1110,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_host_get(struct ata_host *host);
+extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
struct scsi_host_template *sht);
@@ -1495,6 +1498,29 @@ static inline bool ata_tag_valid(unsigned int tag)
return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
}
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
+ for ((tag) = 0; (tag) < (max_tag) && \
+ ({ qc = fn((ap), (tag)); 1; }); (tag)++) \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
/*
* device helpers
*/
diff --git a/include/linux/list.h b/include/linux/list.h
index 4b129df4d46b..de04cc5ed536 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f1131c8dd54..97a020c616ad 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -576,6 +576,10 @@
* userspace to load a kernel module with the given name.
* @kmod_name name of the module requested by the kernel
* Return 0 if successful.
+ * @kernel_load_data:
+ * Load data provided by userspace.
+ * @id kernel load data identifier
+ * Return 0 if permission is granted.
* @kernel_read_file:
* Read a file specified by userspace.
* @file contains the file structure pointing to the file being read
@@ -1569,7 +1573,7 @@ union security_list_options {
int (*file_send_sigiotask)(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file, const struct cred *cred);
+ int (*file_open)(struct file *file);
int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
@@ -1582,6 +1586,7 @@ union security_list_options {
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_load_data)(enum kernel_load_data_id id);
int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -1872,6 +1877,7 @@ struct security_hook_heads {
struct hlist_head cred_getsecid;
struct hlist_head kernel_act_as;
struct hlist_head kernel_create_files_as;
+ struct hlist_head kernel_load_data;
struct hlist_head kernel_read_file;
struct hlist_head kernel_post_read_file;
struct hlist_head kernel_module_request;
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4f5f8c21e283..1eb6f244588d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -27,6 +27,8 @@
*/
#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
+
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ca59883c8364..516920549378 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -20,31 +20,60 @@
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
-/* Definition of memblock flags. */
-enum {
+/**
+ * enum memblock_flags - definition of memory region attributes
+ * @MEMBLOCK_NONE: no special request
+ * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_MIRROR: mirrored region
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ */
+enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
};
+/**
+ * struct memblock_region - represents a memory region
+ * @base: physical address of the region
+ * @size: size of the region
+ * @flags: memory region attributes
+ * @nid: NUMA node id
+ */
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
- unsigned long flags;
+ enum memblock_flags flags;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int nid;
#endif
};
+/**
+ * struct memblock_type - collection of memory regions of certain type
+ * @cnt: number of regions
+ * @max: size of the allocated array
+ * @total_size: size of all regions
+ * @regions: array of regions
+ * @name: the memory type symbolic name
+ */
struct memblock_type {
- unsigned long cnt; /* number of regions */
- unsigned long max; /* size of the allocated array */
- phys_addr_t total_size; /* size of all regions */
+ unsigned long cnt;
+ unsigned long max;
+ phys_addr_t total_size;
struct memblock_region *regions;
char *name;
};
+/**
+ * struct memblock - memblock allocator metadata
+ * @bottom_up: is bottom up direction?
+ * @current_limit: physical address of the current allocation limit
+ * @memory: usabe memory regions
+ * @reserved: reserved memory regions
+ * @physmem: all physical memory
+ */
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
@@ -72,7 +101,7 @@ void memblock_discard(void);
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- int nid, ulong flags);
+ int nid, enum memblock_flags flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
@@ -89,19 +118,19 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-ulong choose_memblock_flags(void);
+enum memblock_flags choose_memblock_flags(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
- int nid, unsigned long flags);
+ int nid, enum memblock_flags flags);
-void __next_mem_range(u64 *idx, int nid, ulong flags,
+void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
@@ -239,7 +268,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
/**
* for_each_resv_unavail_range - iterate through reserved and unavailable memory
* @i: u64 used as loop variable
- * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
*
@@ -253,13 +281,13 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
static inline void memblock_set_region_flags(struct memblock_region *r,
- unsigned long flags)
+ enum memblock_flags flags)
{
r->flags |= flags;
}
static inline void memblock_clear_region_flags(struct memblock_region *r,
- unsigned long flags)
+ enum memblock_flags flags)
{
r->flags &= ~flags;
}
@@ -317,10 +345,10 @@ static inline bool memblock_bottom_up(void)
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- ulong flags);
+ enum memblock_flags flags);
phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t max_addr,
- int nid, ulong flags);
+ int nid, enum memblock_flags flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -367,8 +395,10 @@ phys_addr_t memblock_get_current_limit(void);
*/
/**
- * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
+ * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the memory region
*/
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
{
@@ -376,8 +406,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
}
/**
- * memblock_region_memory_end_pfn - Return the end_pfn this region
+ * memblock_region_memory_end_pfn - get the end pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
{
@@ -385,8 +417,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
}
/**
- * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
+ * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the reserved region
*/
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
{
@@ -394,8 +428,10 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
}
/**
- * memblock_region_reserved_end_pfn - Return the end_pfn this region
+ * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c6fb116e925..680d3395fc83 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -317,6 +317,9 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound);
+int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp,
+ bool compound);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare, bool compound);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
@@ -789,6 +792,16 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
return 0;
}
+static inline int mem_cgroup_try_charge_delay(struct page *page,
+ struct mm_struct *mm,
+ gfp_t gfp_mask,
+ struct mem_cgroup **memcgp,
+ bool compound)
+{
+ *memcgp = NULL;
+ return 0;
+}
+
static inline void mem_cgroup_commit_charge(struct page *page,
struct mem_cgroup *memcg,
bool lrucare, bool compound)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31ca3e28b0eb..a6ddefc60517 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -38,6 +38,7 @@ struct memory_block {
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 122e7e9d3091..dca6ab4eaa99 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
u32 vf_caps;
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
};
struct mlx4_buf_list {
@@ -851,6 +852,12 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+struct mlx4_fw_crdump {
+ bool snapshot_enable;
+ struct devlink_region *region_crspace;
+ struct devlink_region *region_fw_health;
+};
+
enum mlx4_pci_status {
MLX4_PCI_STATUS_DISABLED,
MLX4_PCI_STATUS_ENABLED,
@@ -871,6 +878,7 @@ struct mlx4_dev_persistent {
u8 interface_state;
struct mutex pci_status_mutex; /* sync pci state */
enum mlx4_pci_status pci_status;
+ struct mlx4_fw_crdump crdump;
};
struct mlx4_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 02f72ebf31a7..11fa4e66afc5 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -332,6 +332,13 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
+
+ MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+};
+
+enum {
+ MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
+ MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
};
enum {
@@ -750,7 +757,7 @@ enum {
#define MLX5_MINI_CQE_ARRAY_SIZE 8
-static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
{
return (cqe->op_own >> 2) & 0x3;
}
@@ -770,14 +777,14 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 2) & 0x3;
}
-static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
return cqe->outer_l3_tunneled & 0x1;
}
-static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_l3_hdr_type & 0x1);
+ return cqe->l4_l3_hdr_type & 0x1;
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -939,9 +946,9 @@ enum {
};
enum {
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
- MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+ MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
};
enum {
@@ -1071,6 +1078,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_GEN(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+#define MLX5_CAP_GEN_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80cbb7fdce4a..54f385cc8811 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -138,9 +138,14 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MTRC_CAP = 0x9040,
+ MLX5_REG_MTRC_CONF = 0x9041,
+ MLX5_REG_MTRC_STDB = 0x9042,
+ MLX5_REG_MTRC_CTRL = 0x9043,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
@@ -358,6 +363,7 @@ struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
u32 frag_sz_m1;
+ u32 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
@@ -811,6 +817,9 @@ struct mlx5_clock {
struct mlx5_pps pps_info;
};
+struct mlx5_fw_tracer;
+struct mlx5_vxlan;
+
struct mlx5_core_dev {
struct pci_dev *pdev;
/* sync pci state */
@@ -842,6 +851,7 @@ struct mlx5_core_dev {
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_vxlan *vxlan;
struct {
struct mlx5_rsvd_gids reserved_gids;
u32 roce_en;
@@ -855,6 +865,7 @@ struct mlx5_core_dev {
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
+ struct mlx5_fw_tracer *tracer;
};
struct mlx5_db {
@@ -983,14 +994,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
- struct mlx5_frag_buf_ctrl *fbc)
+static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+ u32 strides_offset,
+ struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+ fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
}
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1023,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix)
{
- unsigned int frag = (ix >> fbc->log_frag_strides);
+ unsigned int frag;
+
+ ix += fbc->strides_offset;
+ frag = ix >> fbc->log_frag_strides;
return fbc->frag_buf.frags[frag].buf +
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
@@ -1067,8 +1089,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
- u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index d3c9db492b30..fab5121ffb8f 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -8,6 +8,8 @@
#include <linux/mlx5/driver.h>
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
enum {
SRIOV_NONE,
SRIOV_LEGACY,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 757b4a30281e..71fb503b2b52 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -152,6 +152,8 @@ struct mlx5_fs_vlan {
u8 prio;
};
+#define MLX5_FS_VLAN_DEPTH 2
+
struct mlx5_flow_act {
u32 action;
bool has_flow_tag;
@@ -159,7 +161,7 @@ struct mlx5_flow_act {
u32 encap_id;
u32 modify_id;
uintptr_t esp_id;
- struct mlx5_fs_vlan vlan;
+ struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
};
@@ -175,7 +177,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num);
+ int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 27134c4fcb76..6ead9c1a5396 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -76,6 +76,15 @@ enum {
};
enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4),
+ MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5),
+};
+
+enum {
+ MLX5_OBJ_TYPE_UCTX = 0x0004,
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -242,6 +251,8 @@ enum {
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
MLX5_CMD_OP_MAX
};
@@ -326,7 +337,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_9[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
- u8 reserved_at_c[0x14];
+ u8 reserved_at_c[0x1];
+ u8 pop_vlan_2[0x1];
+ u8 push_vlan_2[0x1];
+ u8 reserved_at_f[0x11];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -654,7 +668,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xd];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
u8 tunnel_stateless_geneve_rx[0x1];
@@ -874,7 +890,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq_sz[0x8];
u8 reserved_at_e8[0x2];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0xc];
+ u8 reserved_at_f0[0x8];
+ u8 dump_fill_mkey[0x1];
+ u8 reserved_at_f9[0x3];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@@ -922,7 +940,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 eswitch_flow_table[0x1];
+ u8 eswitch_manager[0x1];
u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
@@ -1113,7 +1131,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_at_400[0x80];
+ u8 general_obj_types[0x40];
+
+ u8 reserved_at_440[0x20];
+
+ u8 reserved_at_460[0x10];
+ u8 max_num_eqs[0x10];
u8 reserved_at_480[0x3];
u8 log_max_l2_table[0x5];
@@ -1668,7 +1691,11 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 rx_buffer_full_low[0x20];
- u8 reserved_at_1c0[0x600];
+ u8 rx_icrc_encapsulated_high[0x20];
+
+ u8 rx_icrc_encapsulated_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -2367,6 +2394,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
};
struct mlx5_ifc_vlan_bits {
@@ -2397,7 +2426,9 @@ struct mlx5_ifc_flow_context_bits {
u8 modify_header_id[0x20];
- u8 reserved_at_100[0x100];
+ struct mlx5_ifc_vlan_bits push_vlan_2;
+
+ u8 reserved_at_120[0xe0];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -3733,8 +3764,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
};
enum {
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
};
struct mlx5_ifc_query_vport_state_in_bits {
@@ -8030,9 +8061,23 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
-struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x76];
+struct mlx5_ifc_mpegc_reg_bits {
+ u8 reserved_at_0[0x30];
+ u8 field_select[0x10];
+ u8 tx_overflow_sense[0x1];
+ u8 mark_cqe[0x1];
+ u8 mark_cnp[0x1];
+ u8 reserved_at_43[0x1b];
+ u8 tx_lossy_overflow_oper[0x2];
+
+ u8 reserved_at_60[0x100];
+};
+
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x6d];
+ u8 rx_icrc_encapsulated_counter[0x1];
+ u8 reserved_at_6e[0x8];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x4];
u8 rx_buffer_fullness_counters[0x1];
@@ -8077,7 +8122,11 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7b];
+ u8 reserved_at_0[0x74];
+ u8 mark_tx_action_cnp[0x1];
+ u8 mark_tx_action_cqe[0x1];
+ u8 dynamic_tx_overflow[0x1];
+ u8 reserved_at_77[0x4];
u8 pcie_outbound_stalled[0x1];
u8 tx_overflow_buffer_pkt[0x1];
u8 mtpps_enh_out_per_adj[0x1];
@@ -8092,7 +8141,11 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcqi[0x1];
u8 reserved_at_1f[0x1];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x9];
+ u8 mpegc[0x1];
+ u8 regs_85_to_68[0x12];
+ u8 tracer_registers[0x4];
+
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
@@ -9115,4 +9168,113 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_umem_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x5b];
+ u8 log_page_size[0x5];
+
+ u8 page_offset[0x20];
+
+ u8 num_of_mtt[0x40];
+
+ struct mlx5_ifc_mtt_bits mtt[0];
+};
+
+struct mlx5_ifc_uctx_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_create_umem_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_umem_bits umem;
+};
+
+struct mlx5_ifc_create_uctx_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_uctx_bits uctx;
+};
+
+struct mlx5_ifc_mtrc_string_db_param_bits {
+ u8 string_db_base_address[0x20];
+
+ u8 reserved_at_20[0x8];
+ u8 string_db_size[0x18];
+};
+
+struct mlx5_ifc_mtrc_cap_bits {
+ u8 trace_owner[0x1];
+ u8 trace_to_memory[0x1];
+ u8 reserved_at_2[0x4];
+ u8 trc_ver[0x2];
+ u8 reserved_at_8[0x14];
+ u8 num_string_db[0x4];
+
+ u8 first_string_trace[0x8];
+ u8 num_string_trace[0x8];
+ u8 reserved_at_30[0x28];
+
+ u8 log_max_trace_buffer_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8];
+
+ u8 reserved_at_280[0x180];
+};
+
+struct mlx5_ifc_mtrc_conf_bits {
+ u8 reserved_at_0[0x1c];
+ u8 trace_mode[0x4];
+ u8 reserved_at_20[0x18];
+ u8 log_trace_buffer_size[0x8];
+ u8 trace_mkey[0x20];
+ u8 reserved_at_60[0x3a0];
+};
+
+struct mlx5_ifc_mtrc_stdb_bits {
+ u8 string_db_index[0x4];
+ u8 reserved_at_4[0x4];
+ u8 read_size[0x18];
+ u8 start_offset[0x20];
+ u8 string_db_data[0];
+};
+
+struct mlx5_ifc_mtrc_ctrl_bits {
+ u8 trace_status[0x2];
+ u8 reserved_at_2[0x2];
+ u8 arm_event[0x1];
+ u8 reserved_at_5[0xb];
+ u8 modify_field_select[0x10];
+ u8 reserved_at_20[0x2b];
+ u8 current_timestamp52_32[0x15];
+ u8 current_timestamp31_0[0x20];
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 64d0f40d4cc3..37e065a80a43 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
enum fpga_tls_cmds {
CMD_SETUP_STREAM = 0x1001,
CMD_TEARDOWN_STREAM = 0x1002,
+ CMD_RESYNC_RX = 0x1003,
};
#define MLX5_TLS_1_2 (0)
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 9208cb8809ac..7e7c6dfcfb09 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -43,8 +43,6 @@ enum {
};
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a0fbb9ffe380..68a5121694ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
* mmap() functions).
*/
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -450,6 +452,23 @@ struct vm_operations_struct {
unsigned long addr);
};
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+ static const struct vm_operations_struct dummy_vm_ops = {};
+
+ vma->vm_mm = mm;
+ vma->vm_ops = &dummy_vm_ops;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+}
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+ vma->vm_ops = NULL;
+}
+
+/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
+#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
+
struct mmu_gather;
struct inode;
@@ -2132,7 +2151,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 99ce070e7dcb..efdc24dd9e97 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -335,176 +335,183 @@ struct core_state {
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ struct {
+ struct vm_area_struct *mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+ u32 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
#endif
- unsigned long mmap_base; /* base of mmap area */
- unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
- unsigned long mmap_compat_base;
- unsigned long mmap_compat_legacy_base;
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
#endif
- unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
- pgd_t * pgd;
-
- /**
- * @mm_users: The number of users including userspace.
- *
- * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
- * to 0 (i.e. when the task exits and there are no other temporary
- * reference holders), we also release a reference on @mm_count
- * (which may then free the &struct mm_struct if @mm_count also
- * drops to 0).
- */
- atomic_t mm_users;
-
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
+ unsigned long task_size; /* size of task vm space */
+ unsigned long highest_vm_end; /* highest vma end address */
+ pgd_t * pgd;
+
+ /**
+ * @mm_users: The number of users including userspace.
+ *
+ * Use mmget()/mmget_not_zero()/mmput() to modify. When this
+ * drops to 0 (i.e. when the task exits and there are no other
+ * temporary reference holders), we also release a reference on
+ * @mm_count (which may then free the &struct mm_struct if
+ * @mm_count also drops to 0).
+ */
+ atomic_t mm_users;
+
+ /**
+ * @mm_count: The number of references to &struct mm_struct
+ * (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+ * &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
- int map_count; /* number of VMAs */
+ int map_count; /* number of VMAs */
- spinlock_t page_table_lock; /* Protects page tables and some counters */
- struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock; /* Protects page tables and some
+ * counters
+ */
+ struct rw_semaphore mmap_sem;
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
+ struct list_head mmlist; /* List of maybe swapped mm's. These
+ * are globally strung together off
+ * init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
- unsigned long total_vm; /* Total pages mapped */
- unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
- unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ unsigned long pinned_vm; /* Refcount permanently increased */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
+ unsigned long def_flags;
- spinlock_t arg_lock; /* protect the below fields */
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
+ spinlock_t arg_lock; /* protect the below fields */
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
-
- struct linux_binfmt *binfmt;
+ /*
+ * Special counters, in some configurations protected by the
+ * page_table_lock, in other configurations by being atomic.
+ */
+ struct mm_rss_stat rss_stat;
- cpumask_var_t cpu_vm_mask_var;
+ struct linux_binfmt *binfmt;
- /* Architecture-specific MM context */
- mm_context_t context;
+ /* Architecture-specific MM context */
+ mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access the bits */
+ unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
+ struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_MEMBARRIER
- atomic_t membarrier_state;
+ atomic_t membarrier_state;
#endif
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct kioctx_table __rcu *ioctx_table;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
- /*
- * "owner" points to a task that is regarded as the canonical
- * user/owner of this mm. All of the following must be true in
- * order for it to be changed:
- *
- * current == mm->owner
- * current->mm != mm
- * new_owner->mm == mm
- * new_owner->alloc_lock is held
- */
- struct task_struct __rcu *owner;
+ /*
+ * "owner" points to a task that is regarded as the canonical
+ * user/owner of this mm. All of the following must be true in
+ * order for it to be changed:
+ *
+ * current == mm->owner
+ * current->mm != mm
+ * new_owner->mm == mm
+ * new_owner->alloc_lock is held
+ */
+ struct task_struct __rcu *owner;
#endif
- struct user_namespace *user_ns;
+ struct user_namespace *user_ns;
- /* store ref to file /proc/<pid>/exe symlink points to */
- struct file __rcu *exe_file;
+ /* store ref to file /proc/<pid>/exe symlink points to */
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page_table_lock */
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
- /*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and migrate
- * pages to new nodes if necessary.
- */
- unsigned long numa_next_scan;
+ /*
+ * numa_next_scan is the next time that the PTEs will be marked
+ * pte_numa. NUMA hinting faults will gather statistics and
+ * migrate pages to new nodes if necessary.
+ */
+ unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
- unsigned long numa_scan_offset;
+ /* Restart point for scanning and setting pte_numa */
+ unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
- int numa_scan_seq;
+ /* numa_scan_seq prevents two threads setting pte_numa */
+ int numa_scan_seq;
#endif
- /*
- * An operation with batched TLB flushing is going on. Anything that
- * can move process memory needs to flush the TLB when moving a
- * PROT_NONE or PROT_NUMA mapped page.
- */
- atomic_t tlb_flush_pending;
+ /*
+ * An operation with batched TLB flushing is going on. Anything
+ * that can move process memory needs to flush the TLB when
+ * moving a PROT_NONE or PROT_NUMA mapped page.
+ */
+ atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- /* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
#endif
- struct uprobes_state uprobes_state;
+ struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
- atomic_long_t hugetlb_usage;
+ atomic_long_t hugetlb_usage;
#endif
- struct work_struct async_put_work;
+ struct work_struct async_put_work;
#if IS_ENABLED(CONFIG_HMM)
- /* HMM needs to track a few things per mm */
- struct hmm *hmm;
+ /* HMM needs to track a few things per mm */
+ struct hmm *hmm;
#endif
-} __randomize_layout;
+ } __randomize_layout;
+
+ /*
+ * The mm_cpumask needs to be at the end of mm_struct, because it
+ * is dynamically sized based on nr_cpu_ids.
+ */
+ unsigned long cpu_bitmap[];
+};
extern struct mm_struct init_mm;
+/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- mm->cpu_vm_mask_var = &mm->cpumask_allocation;
-#endif
- cpumask_clear(mm->cpu_vm_mask_var);
+ unsigned long cpu_bitmap = (unsigned long)mm;
+
+ cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return mm->cpu_vm_mask_var;
+ return (struct cpumask *)&mm->cpu_bitmap;
}
struct mmu_gather;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 2014bd19f28e..96a71a648eed 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -501,6 +501,7 @@ enum dmi_field {
DMI_PRODUCT_VERSION,
DMI_PRODUCT_SERIAL,
DMI_PRODUCT_UUID,
+ DMI_PRODUCT_SKU,
DMI_PRODUCT_FAMILY,
DMI_BOARD_VENDOR,
DMI_BOARD_NAME,
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index d633f737b3c6..6675b9f81979 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -2,7 +2,7 @@
#define __LINUX_MROUTE_BASE_H
#include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -254,6 +254,7 @@ struct mr_table {
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
+ bool mroute_do_wrvifwhole;
int mroute_reg_vif_num;
};
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a86c4fa93115..cd0be91bdefa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -67,9 +67,11 @@ struct mtd_erase_region_info {
* @datbuf: data buffer - if NULL only oob data are read/written
* @oobbuf: oob data buffer
*
- * Note, it is allowed to read more than one OOB area at one go, but not write.
- * The interface assumes that the OOB write requests program only one page's
- * OOB area.
+ * Note, some MTD drivers do not allow you to write more than one OOB area at
+ * one go. If you try to do that on such an MTD device, -EINVAL will be
+ * returned. If you want to make your implementation portable on all kind of MTD
+ * devices you should split the write request into several sub-requests when the
+ * request crosses a page boundary.
*/
struct mtd_oob_ops {
unsigned int mode;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 3e8ec3b8a39c..efb2345359bb 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,11 +21,10 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/of.h>
#include <linux/types.h>
-struct mtd_info;
struct nand_flash_dev;
-struct device_node;
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
@@ -36,17 +35,6 @@ static inline int nand_scan(struct mtd_info *mtd, int max_chips)
return nand_scan_with_ids(mtd, max_chips, NULL);
}
-/*
- * Separate phases of nand_scan(), allowing board driver to intervene
- * and override command or ECC setup according to flash type.
- */
-int nand_scan_ident(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *table);
-int nand_scan_tail(struct mtd_info *mtd);
-
-/* Unregister the MTD device and free resources held by the NAND device */
-void nand_release(struct mtd_info *mtd);
-
/* Internal helper for board drivers which need to override command function */
void nand_wait_ready(struct mtd_info *mtd);
@@ -121,6 +109,7 @@ enum nand_ecc_algo {
NAND_ECC_UNKNOWN,
NAND_ECC_HAMMING,
NAND_ECC_BCH,
+ NAND_ECC_RS,
};
/*
@@ -218,6 +207,12 @@ enum nand_ecc_algo {
*/
#define NAND_WAIT_TCCS 0x00200000
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM 0x00400000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -230,6 +225,17 @@ enum nand_ecc_algo {
/* Keep gcc happy */
struct nand_chip;
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
/* ONFI features */
#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
@@ -470,13 +476,13 @@ struct onfi_params {
*/
struct nand_parameters {
/* Generic parameters */
- char model[100];
+ const char *model;
bool supports_set_get_features;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
/* ONFI parameters */
- struct onfi_params onfi;
+ struct onfi_params *onfi;
};
/* The maximum expected count of bytes in the NAND ID sequence */
@@ -493,20 +499,42 @@ struct nand_id {
};
/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
* @lock: protection lock
* @active: the mtd device which holds the controller currently
* @wq: wait queue to sleep on if a NAND operation is in
* progress used instead of the per chip wait queue
* when a hw controller is available.
+ * @ops: NAND controller operations.
*/
-struct nand_hw_control {
+struct nand_controller {
spinlock_t lock;
struct nand_chip *active;
wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
};
-static inline void nand_hw_control_init(struct nand_hw_control *nfc)
+static inline void nand_controller_init(struct nand_controller *nfc)
{
nfc->active = NULL;
spin_lock_init(&nfc->lock);
@@ -778,11 +806,15 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* implementation) if any.
* @cleanup: the ->init() function may have allocated resources, ->cleanup()
* is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
*/
struct nand_manufacturer_ops {
void (*detect)(struct nand_chip *chip);
int (*init)(struct nand_chip *chip);
void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
};
/**
@@ -986,14 +1018,14 @@ struct nand_subop {
unsigned int last_instr_end_off;
};
-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int op_id);
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
/**
* struct nand_op_parser_addr_constraints - Constraints for address instructions
@@ -1176,9 +1208,9 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @hwcontrol: platform-specific hardware control structure
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
* @erase: [REPLACEABLE] erase function
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
* @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
* data from array to read regs (tR).
* @state: [INTERN] the current state of the NAND device
@@ -1271,7 +1303,6 @@ struct nand_chip {
const struct nand_operation *op,
bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
- int (*scan_bbt)(struct mtd_info *mtd);
int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1314,11 +1345,11 @@ struct nand_chip {
flstate_t state;
uint8_t *oob_poi;
- struct nand_hw_control *controller;
+ struct nand_controller *controller;
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_hw_control hwcontrol;
+ struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1517,14 +1548,12 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
-int nand_default_bbt(struct mtd_info *mtd);
+int nand_create_bbt(struct nand_chip *chip);
int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt);
-int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
/**
* struct platform_nand_chip - chip level device structure
@@ -1555,14 +1584,12 @@ struct platform_device;
* struct platform_nand_ctrl - controller level device structure
* @probe: platform specific function to probe/setup hardware
* @remove: platform specific function to remove/teardown hardware
- * @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
* @write_buf: platform specific function for write buffer
* @read_buf: platform specific function for read buffer
- * @read_byte: platform specific function to read one byte from chip
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
@@ -1570,13 +1597,11 @@ struct platform_device;
struct platform_nand_ctrl {
int (*probe)(struct platform_device *pdev);
void (*remove)(struct platform_device *pdev);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- unsigned char (*read_byte)(struct mtd_info *mtd);
void *priv;
};
@@ -1593,10 +1618,10 @@ struct platform_nand_data {
/* return the supported asynchronous timing mode. */
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
{
- if (!chip->parameters.onfi.version)
+ if (!chip->parameters.onfi)
return ONFI_TIMING_MODE_UNKNOWN;
- return chip->parameters.onfi.async_timing_mode;
+ return chip->parameters.onfi->async_timing_mode;
}
int onfi_fill_data_interface(struct nand_chip *chip,
@@ -1641,14 +1666,8 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
void *extraoob, int extraooblen,
int threshold);
-int nand_check_ecc_caps(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_match_ecc_req(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_maximize_ecc(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
/* Default write_oob implementation */
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
@@ -1674,10 +1693,14 @@ int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
/* Default read_page_raw implementation */
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page);
+int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page);
/* Default write_page_raw implementation */
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page);
+int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1711,8 +1734,13 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
-/* Free resources held by the NAND device */
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
void nand_cleanup(struct nand_chip *chip);
+/* Unregister the MTD device and calls nand_cleanup() */
+void nand_release(struct mtd_info *mtd);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index e60da0d34cc1..c922e97f205a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -235,6 +235,7 @@ enum spi_nor_option_flags {
SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
SNOR_F_READY_XSR_RDY = BIT(4),
SNOR_F_USE_CLSR = BIT(5),
+ SNOR_F_BROKEN_RESET = BIT(6),
};
/**
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 000000000000..088ff96c3eb6
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+#ifndef __LINUX_MTD_SPINAND_H
+#define __LINUX_MTD_SPINAND_H
+
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/**
+ * Standard SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_EN_DIS_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_OP(ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, valptr, 1))
+
+#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, valptr, 1))
+
+#define SPINAND_BLK_ERASE_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 1))
+
+#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 4))
+
+/**
+ * Standard SPI NAND flash commands
+ */
+#define SPINAND_CMD_PROG_LOAD_X4 0x32
+#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
+
+/* feature register */
+#define REG_BLOCK_LOCK 0xa0
+#define BL_ALL_UNLOCKED 0x00
+
+/* configuration register */
+#define REG_CFG 0xb0
+#define CFG_OTP_ENABLE BIT(6)
+#define CFG_ECC_ENABLE BIT(4)
+#define CFG_QUAD_ENABLE BIT(0)
+
+/* status register */
+#define REG_STATUS 0xc0
+#define STATUS_BUSY BIT(0)
+#define STATUS_ERASE_FAILED BIT(2)
+#define STATUS_PROG_FAILED BIT(3)
+#define STATUS_ECC_MASK GENMASK(5, 4)
+#define STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
+#define STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+struct spinand_op;
+struct spinand_device;
+
+#define SPINAND_MAX_ID_LEN 4
+
+/**
+ * struct spinand_id - SPI NAND id structure
+ * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ * be extended if required
+ * @len: ID length
+ *
+ * struct_spinand_id->data contains all bytes returned after a READ_ID command,
+ * including dummy bytes if the chip does not emit ID bytes right after the
+ * READ_ID command. The responsibility to extract real ID bytes is left to
+ * struct_manufacurer_ops->detect().
+ */
+struct spinand_id {
+ u8 data[SPINAND_MAX_ID_LEN];
+ int len;
+};
+
+/**
+ * struct manufacurer_ops - SPI NAND manufacturer specific operations
+ * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
+ * the core calls the struct_manufacurer_ops->detect() hook of each
+ * registered manufacturer until one of them return 1. Note that
+ * the first thing to check in this hook is that the manufacturer ID
+ * in struct_spinand_device->id matches the manufacturer whose
+ * ->detect() hook has been called. Should return 1 if there's a
+ * match, 0 if the manufacturer ID does not match and a negative
+ * error code otherwise. When true is returned, the core assumes
+ * that properties of the NAND chip (spinand->base.memorg and
+ * spinand->base.eccreq) have been filled
+ * @init: initialize a SPI NAND device
+ * @cleanup: cleanup a SPI NAND device
+ *
+ * Each SPI NAND manufacturer driver should implement this interface so that
+ * NAND chips coming from this vendor can be detected and initialized properly.
+ */
+struct spinand_manufacturer_ops {
+ int (*detect)(struct spinand_device *spinand);
+ int (*init)(struct spinand_device *spinand);
+ void (*cleanup)(struct spinand_device *spinand);
+};
+
+/**
+ * struct spinand_manufacturer - SPI NAND manufacturer instance
+ * @id: manufacturer ID
+ * @name: manufacturer name
+ * @ops: manufacturer operations
+ */
+struct spinand_manufacturer {
+ u8 id;
+ char *name;
+ const struct spinand_manufacturer_ops *ops;
+};
+
+/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+
+/**
+ * struct spinand_op_variants - SPI NAND operation variants
+ * @ops: the list of variants for a given operation
+ * @nops: the number of variants
+ *
+ * Some operations like read-from-cache/write-to-cache have several variants
+ * depending on the number of IO lines you use to transfer data or address
+ * cycles. This structure is a way to describe the different variants supported
+ * by a chip and let the core pick the best one based on the SPI mem controller
+ * capabilities.
+ */
+struct spinand_op_variants {
+ const struct spi_mem_op *ops;
+ unsigned int nops;
+};
+
+#define SPINAND_OP_VARIANTS(name, ...) \
+ const struct spinand_op_variants name = { \
+ .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
+ .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
+ sizeof(struct spi_mem_op), \
+ }
+
+/**
+ * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
+ * chip
+ * @get_status: get the ECC status. Should return a positive number encoding
+ * the number of corrected bitflips if correction was possible or
+ * -EBADMSG if there are uncorrectable errors. I can also return
+ * other negative error codes if the error is not caused by
+ * uncorrectable bitflips
+ * @ooblayout: the OOB layout used by the on-die ECC implementation
+ */
+struct spinand_ecc_info {
+ int (*get_status)(struct spinand_device *spinand, u8 status);
+ const struct mtd_ooblayout_ops *ooblayout;
+};
+
+#define SPINAND_HAS_QE_BIT BIT(0)
+
+/**
+ * struct spinand_info - Structure used to describe SPI NAND chips
+ * @model: model name
+ * @devid: device ID
+ * @flags: OR-ing of the SPINAND_XXX flags
+ * @memorg: memory organization
+ * @eccreq: ECC requirements
+ * @eccinfo: on-die ECC info
+ * @op_variants: operations variants
+ * @op_variants.read_cache: variants of the read-cache operation
+ * @op_variants.write_cache: variants of the write-cache operation
+ * @op_variants.update_cache: variants of the update-cache operation
+ * @select_target: function used to select a target/die. Required only for
+ * multi-die chips
+ *
+ * Each SPI NAND manufacturer driver should have a spinand_info table
+ * describing all the chips supported by the driver.
+ */
+struct spinand_info {
+ const char *model;
+ u8 devid;
+ u32 flags;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_req eccreq;
+ struct spinand_ecc_info eccinfo;
+ struct {
+ const struct spinand_op_variants *read_cache;
+ const struct spinand_op_variants *write_cache;
+ const struct spinand_op_variants *update_cache;
+ } op_variants;
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+};
+
+#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
+ { \
+ .read_cache = __read, \
+ .write_cache = __write, \
+ .update_cache = __update, \
+ }
+
+#define SPINAND_ECCINFO(__ooblayout, __get_status) \
+ .eccinfo = { \
+ .ooblayout = __ooblayout, \
+ .get_status = __get_status, \
+ }
+
+#define SPINAND_SELECT_TARGET(__func) \
+ .select_target = __func,
+
+#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
+ __flags, ...) \
+ { \
+ .model = __model, \
+ .devid = __id, \
+ .memorg = __memorg, \
+ .eccreq = __eccreq, \
+ .op_variants = __op_variants, \
+ .flags = __flags, \
+ __VA_ARGS__ \
+ }
+
+/**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+ * @spimem: pointer to the SPI mem object
+ * @lock: lock used to serialize accesses to the NAND
+ * @id: NAND ID as returned by READ_ID
+ * @flags: NAND flags
+ * @op_templates: various SPI mem op templates
+ * @op_templates.read_cache: read cache op template
+ * @op_templates.write_cache: write cache op template
+ * @op_templates.update_cache: update cache op template
+ * @select_target: select a specific target/die. Usually called before sending
+ * a command addressing a page or an eraseblock embedded in
+ * this die. Only required if your chip exposes several dies
+ * @cur_target: currently selected target/die
+ * @eccinfo: on-die ECC information
+ * @cfg_cache: config register cache. One entry per die
+ * @databuf: bounce buffer for data
+ * @oobbuf: bounce buffer for OOB data
+ * @scratchbuf: buffer used for everything but page accesses. This is needed
+ * because the spi-mem interface explicitly requests that buffers
+ * passed in spi_mem_op be DMA-able, so we can't based the bufs on
+ * the stack
+ * @manufacturer: SPI NAND manufacturer information
+ * @priv: manufacturer private data
+ */
+struct spinand_device {
+ struct nand_device base;
+ struct spi_mem *spimem;
+ struct mutex lock;
+ struct spinand_id id;
+ u32 flags;
+
+ struct {
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+ } op_templates;
+
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ unsigned int cur_target;
+
+ struct spinand_ecc_info eccinfo;
+
+ u8 *cfg_cache;
+ u8 *databuf;
+ u8 *oobbuf;
+ u8 *scratchbuf;
+ const struct spinand_manufacturer *manufacturer;
+ void *priv;
+};
+
+/**
+ * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the SPI NAND device attached to @mtd.
+ */
+static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
+}
+
+/**
+ * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
+ * @spinand: SPI NAND device
+ *
+ * Return: the MTD device embedded in @spinand.
+ */
+static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
+{
+ return nanddev_to_mtd(&spinand->base);
+}
+
+/**
+ * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
+ * @nand: NAND object
+ *
+ * Return: the SPI NAND device embedding @nand.
+ */
+static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
+{
+ return container_of(nand, struct spinand_device, base);
+}
+
+/**
+ * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
+ * @spinand: SPI NAND device
+ *
+ * Return: the NAND device embedded in @spinand.
+ */
+static inline struct nand_device *
+spinand_to_nand(struct spinand_device *spinand)
+{
+ return &spinand->base;
+}
+
+/**
+ * spinand_set_of_node - Attach a DT node to a SPI NAND device
+ * @spinand: SPI NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a SPI NAND device.
+ */
+static inline void spinand_set_of_node(struct spinand_device *spinand,
+ struct device_node *np)
+{
+ nanddev_set_of_node(&spinand->base, np);
+}
+
+int spinand_match_and_init(struct spinand_device *dev,
+ const struct spinand_info *table,
+ unsigned int table_size, u8 devid);
+
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+
+#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 08b6eb964dd6..e0930678c8bf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -114,7 +114,7 @@ struct socket {
unsigned long flags;
- struct socket_wq __rcu *wq;
+ struct socket_wq *wq;
struct file *file;
struct sock *sk;
@@ -147,7 +147,6 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
- __poll_t (*poll_mask) (struct socket *sock, __poll_t events);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 623bb8ced060..2b2a6dce1630 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -79,6 +79,7 @@ enum {
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
+ NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -151,6 +152,7 @@ enum {
#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
+#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec9850c7936..ca5ab98053c8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
int __init netdev_boot_setup(char *str);
+struct gro_list {
+ struct list_head list;
+ int count;
+};
+
+/*
+ * size of gro hash buckets, must less than bit number of
+ * napi_struct::gro_bitmask
+ */
+#define GRO_HASH_BUCKETS 8
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
@@ -316,13 +327,13 @@ struct napi_struct {
unsigned long state;
int weight;
- unsigned int gro_count;
+ unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
int poll_owner;
#endif
struct net_device *dev;
- struct sk_buff *gro_list;
+ struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
struct hrtimer timer;
struct list_head dev_list;
@@ -569,6 +580,9 @@ struct netdev_queue {
* (/sys/class/net/DEV/Q/trans_timeout)
*/
unsigned long trans_timeout;
+
+ /* Subordinate device that the queue has been assigned to */
+ struct net_device *sb_dev;
/*
* write-mostly part
*/
@@ -730,10 +744,15 @@ struct xps_map {
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *cpu_map[0];
+ struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
};
-#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
+
+#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
+
+#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
+ (_rxqs * (_tcs) * sizeof(struct xps_map *)))
+
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
enum tc_setup_type {
TC_SETUP_QDISC_MQPRIO,
@@ -792,6 +812,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_RED,
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
+ TC_SETUP_QDISC_ETF,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
- /* Check if a bpf program is set on the device. The callee should
- * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
- * is equivalent to XDP_ATTACHED_DRV.
- */
XDP_QUERY_PROG,
+ XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
@@ -835,9 +853,8 @@ struct netdev_bpf {
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
- /* XDP_QUERY_PROG */
+ /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
struct {
- u8 prog_attached;
u32 prog_id;
/* flags with which program was installed */
u32 prog_flags;
@@ -855,10 +872,10 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_SETUP_XSK_UMEM */
+ /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
struct {
- struct xdp_umem *umem;
- u16 queue_id;
+ struct xdp_umem *umem; /* out for query*/
+ u16 queue_id; /* in for query */
} xsk;
};
};
@@ -891,6 +908,8 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
+ void (*tls_dev_resync_rx)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u64 rcd_sn);
};
#endif
@@ -942,7 +961,8 @@ struct dev_ifalias {
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv, select_queue_fallback_t fallback);
+ * struct net_device *sb_dev,
+ * select_queue_fallback_t fallback);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@@ -1214,7 +1234,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
@@ -1909,7 +1929,8 @@ struct net_device {
int watchdog_timeo;
#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps;
+ struct xps_dev_maps __rcu *xps_cpus_map;
+ struct xps_dev_maps __rcu *xps_rxqs_map;
#endif
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress;
@@ -1978,7 +1999,7 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
+ s16 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev);
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset);
+int netdev_set_sb_channel(struct net_device *dev, u16 channel);
+static inline int netdev_get_sb_channel(struct net_device *dev)
+{
+ return max_t(int, -dev->num_tc, 0);
+}
+
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv);
+ struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}
-typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
-static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
- struct sk_buff **head,
- struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
return cb(head, skb);
}
-typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
- struct sk_buff *);
-static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
@@ -2290,6 +2322,9 @@ struct packet_type {
struct net_device *,
struct packet_type *,
struct net_device *);
+ void (*list_func) (struct list_head *,
+ struct packet_type *,
+ struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
void *af_packet_priv;
@@ -2299,8 +2334,8 @@ struct packet_type {
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *(*gro_receive)(struct list_head *head,
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2784,15 +2825,35 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
}
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
if (PTR_ERR(pp) != -EINPROGRESS)
NAPI_GRO_CB(skb)->flush |= flush;
}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
{
NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
}
#endif
@@ -3258,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ u16 index, bool is_rxqs_map);
+
+/**
+ * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
+ * @j: CPU/Rx queue index
+ * @mask: bitmask of all cpus/rx queues
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
+ */
+static inline bool netif_attr_test_mask(unsigned long j,
+ const unsigned long *mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+ return test_bit(j, mask);
+}
+
+/**
+ * netif_attr_test_online - Test for online CPU/Rx queue
+ * @j: CPU/Rx queue index
+ * @online_mask: bitmask for CPUs/Rx queues that are online
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns true if a CPU/Rx queue is online.
+ */
+static inline bool netif_attr_test_online(unsigned long j,
+ const unsigned long *online_mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+
+ if (online_mask)
+ return test_bit(j, online_mask);
+
+ return (j < nr_bits);
+}
+
+/**
+ * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
+ * @n: CPU/Rx queue index
+ * @srcp: the cpumask/Rx queue mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set.
+ */
+static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (srcp)
+ return find_next_bit(srcp, nr_bits, n + 1);
+
+ return n + 1;
+}
+
+/**
+ * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
+ * @n: CPU/Rx queue index
+ * @src1p: the first CPUs/Rx queues mask pointer
+ * @src2p: the second CPUs/Rx queues mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ */
+static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
+ const unsigned long *src2p,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (src1p && src2p)
+ return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
+ else if (src1p)
+ return find_next_bit(src1p, nr_bits, n + 1);
+ else if (src2p)
+ return find_next_bit(src2p, nr_bits, n + 1);
+
+ return n + 1;
+}
#else
static inline int netif_set_xps_queue(struct net_device *dev,
const struct cpumask *mask,
@@ -3265,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev,
{
return 0;
}
+
+static inline int __netif_set_xps_queue(struct net_device *dev,
+ const unsigned long *mask,
+ u16 index, bool is_rxqs_map)
+{
+ return 0;
+}
#endif
/**
@@ -3284,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq)
+ unsigned int rxqs)
{
+ dev->real_num_rx_queues = rxqs;
return 0;
}
#endif
@@ -3364,6 +3519,7 @@ int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -3398,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
+int dev_set_mtu_ext(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
@@ -3415,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
-void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
- struct netdev_bpf *xdp);
+u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+ enum bpf_netdev_command cmd);
+int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index dd2052f0efb7..07efffd0c759 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
return ret;
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ list_del(&skb->list);
+ if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* Put passed packets back on main list */
+ list_splice(&sublist, head);
+}
+
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
unsigned int len);
@@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
return okfn(net, sk, skb);
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ /* nothing to do */
+}
+
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
@@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+struct nf_conntrack_tuple;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+struct nf_conntrack_tuple;
+static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ return false;
+}
#endif
struct nf_conn;
@@ -398,6 +433,8 @@ enum ip_conntrack_info;
struct nf_ct_hook {
int (*update)(struct net *net, struct sk_buff *skb);
void (*destroy)(struct nf_conntrack *);
+ bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+ const struct sk_buff *);
};
extern struct nf_ct_hook __rcu *nf_ct_hook;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 3ecc3050be0e..4a520d3304a2 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -29,6 +29,7 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
+ struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb);
void (*cleanup)(struct net *net);
diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index 0e114c492fb8..ecf7dab81e9e 100644
--- a/include/linux/netfilter/nf_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -1,16 +1,8 @@
-#include <uapi/linux/netfilter/nf_osf.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFOSF_H
+#define _NFOSF_H
-/* Initial window size option state machine: multiple of mss, mtu or
- * plain numeric value. Can also be made as plain numeric value which
- * is not a multiple of specified value.
- */
-enum nf_osf_window_size_options {
- OSF_WSS_PLAIN = 0,
- OSF_WSS_MSS,
- OSF_WSS_MTU,
- OSF_WSS_MODULO,
- OSF_WSS_MAX,
-};
+#include <uapi/linux/netfilter/nfnetlink_osf.h>
enum osf_fmatch_states {
/* Packet does not match the fingerprint */
@@ -21,6 +13,8 @@ enum osf_fmatch_states {
FMATCH_OPT_WRONG,
};
+extern struct list_head nf_osf_fingers[2];
+
struct nf_osf_finger {
struct rcu_head rcu_head;
struct list_head finger_entry;
@@ -31,3 +25,8 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int hooknum, struct net_device *in, struct net_device *out,
const struct nf_osf_info *info, struct net *net,
const struct list_head *nf_osf_fingers);
+
+const char *nf_osf_find(const struct sk_buff *skb,
+ const struct list_head *nf_osf_fingers);
+
+#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index b671fdfd212b..fa0686500970 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -5,17 +5,6 @@
#include <uapi/linux/netfilter_bridge.h>
#include <linux/skbuff.h>
-enum nf_br_hook_priorities {
- NF_BR_PRI_FIRST = INT_MIN,
- NF_BR_PRI_NAT_DST_BRIDGED = -300,
- NF_BR_PRI_FILTER_BRIDGED = -200,
- NF_BR_PRI_BRNF = 0,
- NF_BR_PRI_NAT_DST_OTHER = 100,
- NF_BR_PRI_FILTER_OTHER = 200,
- NF_BR_PRI_NAT_SRC = 300,
- NF_BR_PRI_LAST = INT_MAX,
-};
-
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index b31dabfdb453..95ab5cc64422 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -23,9 +23,6 @@ struct nf_queue_entry;
#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
-__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol);
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
@@ -35,14 +32,6 @@ static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
{
return 0;
}
-static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
- unsigned int hook,
- unsigned int dataoff,
- unsigned int len,
- u_int8_t protocol)
-{
- return 0;
-}
static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict)
{
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 288c597e75b3..c0dc4dd78887 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -30,11 +30,6 @@ struct nf_ipv6_ops {
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
- __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
- __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol);
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index f3075d6c7e82..71f121b66ca8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -170,7 +170,6 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
- int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9dee3c23895d..712eed156d09 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1438,6 +1438,8 @@ enum {
NFS_IOHDR_EOF,
NFS_IOHDR_REDO,
NFS_IOHDR_STAT,
+ NFS_IOHDR_RESEND_PNFS,
+ NFS_IOHDR_RESEND_MDS,
};
struct nfs_io_completion;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b8d868d23e79..08f9247e9827 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-#else
+
+extern int lockup_detector_online_cpu(unsigned int cpu);
+extern int lockup_detector_offline_cpu(unsigned int cpu);
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static inline void touch_softlockup_watchdog_sched(void) { }
static inline void touch_softlockup_watchdog(void) { }
static inline void touch_softlockup_watchdog_sync(void) { }
static inline void touch_all_softlockup_watchdogs(void) { }
-#endif
+
+#define lockup_detector_online_cpu NULL
+#define lockup_detector_offline_cpu NULL
+#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2950ce957656..68e91ef5494c 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -242,7 +242,12 @@ struct nvme_id_ctrl {
__le32 sanicap;
__le32 hmminds;
__le16 hmmaxd;
- __u8 rsvd338[174];
+ __u8 rsvd338[4];
+ __u8 anatt;
+ __u8 anacap;
+ __le32 anagrpmax;
+ __le32 nanagrpid;
+ __u8 rsvd352[160];
__u8 sqes;
__u8 cqes;
__le16 maxcmd;
@@ -254,11 +259,12 @@ struct nvme_id_ctrl {
__le16 awun;
__le16 awupf;
__u8 nvscc;
- __u8 rsvd531;
+ __u8 nwpc;
__le16 acwu;
__u8 rsvd534[2];
__le32 sgls;
- __u8 rsvd540[228];
+ __le32 mnan;
+ __u8 rsvd544[224];
char subnqn[256];
__u8 rsvd1024[768];
__le32 ioccsz;
@@ -312,7 +318,11 @@ struct nvme_id_ns {
__le16 nabspf;
__le16 noiob;
__u8 nvmcap[16];
- __u8 rsvd64[40];
+ __u8 rsvd64[28];
+ __le32 anagrpid;
+ __u8 rsvd96[3];
+ __u8 nsattr;
+ __u8 rsvd100[4];
__u8 nguid[16];
__u8 eui64[8];
struct nvme_lbaf lbaf[16];
@@ -425,6 +435,32 @@ struct nvme_effects_log {
__u8 resv[2048];
};
+enum nvme_ana_state {
+ NVME_ANA_OPTIMIZED = 0x01,
+ NVME_ANA_NONOPTIMIZED = 0x02,
+ NVME_ANA_INACCESSIBLE = 0x03,
+ NVME_ANA_PERSISTENT_LOSS = 0x04,
+ NVME_ANA_CHANGE = 0x0f,
+};
+
+struct nvme_ana_group_desc {
+ __le32 grpid;
+ __le32 nnsids;
+ __le64 chgcnt;
+ __u8 state;
+ __u8 rsvd17[15];
+ __le32 nsids[];
+};
+
+/* flag for the log specific field of the ANA log */
+#define NVME_ANA_LOG_RGO (1 << 0)
+
+struct nvme_ana_rsp_hdr {
+ __le64 chgcnt;
+ __le16 ngrps;
+ __le16 rsvd10[3];
+};
+
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -444,11 +480,13 @@ enum {
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
+ NVME_AER_NOTICE_ANA = 0x03,
};
enum {
NVME_AEN_CFG_NS_ATTR = 1 << 8,
NVME_AEN_CFG_FW_ACT = 1 << 9,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
};
struct nvme_lba_range_type {
@@ -749,15 +787,22 @@ enum {
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
+ NVME_FEAT_HCTM = 0x10,
+ NVME_FEAT_NOPSC = 0x11,
+ NVME_FEAT_RRL = 0x12,
+ NVME_FEAT_PLM_CONFIG = 0x13,
+ NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83,
+ NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -765,6 +810,14 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+/* NVMe Namespace Write Protect State */
+enum {
+ NVME_NS_NO_WRITE_PROTECT = 0,
+ NVME_NS_WRITE_PROTECT,
+ NVME_NS_WRITE_PROTECT_POWER_CYCLE,
+ NVME_NS_WRITE_PROTECT_PERMANENT,
+};
+
#define NVME_MAX_CHANGED_NAMESPACES 1024
struct nvme_identify {
@@ -880,7 +933,7 @@ struct nvme_get_log_page_command {
__u64 rsvd2[2];
union nvme_data_ptr dptr;
__u8 lid;
- __u8 rsvd10;
+ __u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
__u16 rsvd11;
@@ -1111,6 +1164,8 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_NS_WRITE_PROTECTED = 0x20,
+
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
@@ -1180,6 +1235,13 @@ enum {
NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_UNWRITTEN_BLOCK = 0x287,
+ /*
+ * Path-related Errors:
+ */
+ NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
+ NVME_SC_ANA_INACCESSIBLE = 0x302,
+ NVME_SC_ANA_TRANSITION = 0x303,
+
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
index e6b240b6196c..379affc63e24 100644
--- a/include/linux/openvswitch.h
+++ b/include/linux/openvswitch.h
@@ -21,4 +21,9 @@
#include <uapi/linux/openvswitch.h>
+#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero,
+ * actions in clone will not change flow
+ * keys. False otherwise.
+ */
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0543800ec565..9b87f1936906 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -261,6 +261,9 @@ enum pci_bus_speed {
PCI_SPEED_UNKNOWN = 0xff,
};
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
+
struct pci_cap_saved_data {
u16 cap_nr;
bool cap_extended;
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
deleted file mode 100644
index 07d78e4653bc..000000000000
--- a/include/linux/percpu_ida.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERCPU_IDA_H__
-#define __PERCPU_IDA_H__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock_types.h>
-#include <linux/wait.h>
-#include <linux/cpumask.h>
-
-struct percpu_ida_cpu;
-
-struct percpu_ida {
- /*
- * number of tags available to be allocated, as passed to
- * percpu_ida_init()
- */
- unsigned nr_tags;
- unsigned percpu_max_size;
- unsigned percpu_batch_size;
-
- struct percpu_ida_cpu __percpu *tag_cpu;
-
- /*
- * Bitmap of cpus that (may) have tags on their percpu freelists:
- * steal_tags() uses this to decide when to steal tags, and which cpus
- * to try stealing from.
- *
- * It's ok for a freelist to be empty when its bit is set - steal_tags()
- * will just keep looking - but the bitmap _must_ be set whenever a
- * percpu freelist does have tags.
- */
- cpumask_t cpus_have_tags;
-
- struct {
- spinlock_t lock;
- /*
- * When we go to steal tags from another cpu (see steal_tags()),
- * we want to pick a cpu at random. Cycling through them every
- * time we steal is a bit easier and more or less equivalent:
- */
- unsigned cpu_last_stolen;
-
- /* For sleeping on allocation failure */
- wait_queue_head_t wait;
-
- /*
- * Global freelist - it's a stack where nr_free points to the
- * top
- */
- unsigned nr_free;
- unsigned *freelist;
- } ____cacheline_aligned_in_smp;
-};
-
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
-/* Max size of percpu freelist, */
-#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
-
-int percpu_ida_alloc(struct percpu_ida *pool, int state);
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
-
-void percpu_ida_destroy(struct percpu_ida *pool);
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size);
-static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
-{
- return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
- IDA_DEFAULT_PCPU_BATCH_MOVE);
-}
-
-typedef int (*percpu_ida_cb)(unsigned, void *);
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data);
-
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
-#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index ad5444491975..10f92e1d8e7b 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -25,6 +25,12 @@
*/
#define ARMPMU_MAX_HWEVENTS 32
+/*
+ * ARM PMU hw_event flags
+ */
+/* Event uses a 64bit counter */
+#define ARMPMU_EVT_64BIT 1
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xFFFF
@@ -87,14 +93,13 @@ struct arm_pmu {
struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
+ u64 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u64 val);
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
- u64 max_period;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1fa12887ec02..53c500f0ca79 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
};
/**
- * enum perf_event_state - the states of a event
+ * enum perf_event_state - the states of an event:
*/
enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6cd09098427c..cd6f637cbbfb 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -825,6 +825,16 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
}
/**
+ * phy_polling_mode - Convenience function for testing whether polling is
+ * used to detect PHY status changes
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_polling_mode(struct phy_device *phydev)
+{
+ return phydev->irq == PHY_POLL;
+}
+
+/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
*/
@@ -942,6 +952,8 @@ void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+int phy_speed_down(struct phy_device *phydev, bool sync);
+int phy_speed_up(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 50eeae025f1e..021fc6595856 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -234,5 +234,6 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
+void phylink_helper_basex_speed(struct phylink_link_state *state);
#endif
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index 09eb80f2574a..8dd85d302b90 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -28,7 +28,8 @@ struct seq_file;
* is not available on this controller this should return -ENOTSUPP
* and if it is available but disabled it should return -EINVAL
* @pin_config_set: configure an individual pin
- * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_get: get configurations for an entire pin group; should
+ * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get.
* @pin_config_group_set: configure all pins in a group
* @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
* @pin_config_dbg_show: optional debugfs display hook that will provide
diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h
deleted file mode 100644
index 30d169dfadf3..000000000000
--- a/include/linux/platform_data/bt-nokia-h4p.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is part of Nokia H4P bluetooth driver
- *
- * Copyright (C) 2010 Nokia Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-
-/**
- * struct hci_h4p_platform data - hci_h4p Platform data structure
- */
-struct hci_h4p_platform_data {
- int chip_type;
- int bt_sysclk;
- unsigned int bt_wakeup_gpio;
- unsigned int host_wakeup_gpio;
- unsigned int reset_gpio;
- int reset_gpio_shared;
- unsigned int uart_irq;
- phys_addr_t uart_base;
- const char *uart_iclk;
- const char *uart_fclk;
- void (*set_pm_limits)(struct device *dev, bool set);
-};
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 90ae19ca828f..57a5a35e0073 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -22,6 +22,7 @@
#include <asm-generic/gpio.h>
#define MAX_REGS_BANKS 5
+#define MAX_INT_PER_BANK 32
struct davinci_gpio_platform_data {
u32 ngpio;
@@ -41,7 +42,7 @@ struct davinci_gpio_controller {
spinlock_t lock;
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
- unsigned int base_irq;
+ int irqs[MAX_INT_PER_BANK];
unsigned int base;
};
diff --git a/include/linux/platform_data/jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h
new file mode 100644
index 000000000000..bc571f6d5ced
--- /dev/null
+++ b/include/linux/platform_data/jz4740/jz4740_nand.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC NAND controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __JZ4740_NAND_H__
+#define __JZ4740_NAND_H__
+
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#define JZ_NAND_NUM_BANKS 4
+
+struct jz_nand_platform_data {
+ int num_partitions;
+ struct mtd_partition *partitions;
+
+ unsigned char banks[JZ_NAND_NUM_BANKS];
+
+ void (*ident_callback)(struct platform_device *, struct mtd_info *,
+ struct mtd_partition **, int *num_partitions);
+};
+
+#endif
diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h
deleted file mode 100644
index 6a4a809fe9a3..000000000000
--- a/include/linux/platform_data/media/sii9234.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Driver header for SII9234 MHL converter chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef SII9234_H
-#define SII9234_H
-
-/**
- * @gpio_n_reset: GPIO driving nRESET pin
- */
-
-struct sii9234_platform_data {
- int gpio_n_reset;
-};
-
-#endif /* SII9234_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index d1397c8ed94e..6397b9c8149a 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -12,9 +12,13 @@
#ifndef MMP_DMA_H
#define MMP_DMA_H
+struct dma_slave_map;
+
struct mmp_dma_platdata {
int dma_channels;
int nb_requestors;
+ int slave_map_cnt;
+ const struct dma_slave_map *slave_map;
};
#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h
index a7ce77c7c1a8..34828eb85982 100644
--- a/include/linux/platform_data/mtd-orion_nand.h
+++ b/include/linux/platform_data/mtd-orion_nand.h
@@ -12,7 +12,6 @@
*/
struct orion_nand_data {
struct mtd_partition *parts;
- int (*dev_ready)(struct mtd_info *mtd);
u32 nr_parts;
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */
diff --git a/include/linux/platform_data/txx9/ndfmc.h b/include/linux/platform_data/txx9/ndfmc.h
new file mode 100644
index 000000000000..fc172627d54e
--- /dev/null
+++ b/include/linux/platform_data/txx9/ndfmc.h
@@ -0,0 +1,30 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2007
+ */
+#ifndef __TXX9_NDFMC_H
+#define __TXX9_NDFMC_H
+
+#define NDFMC_PLAT_FLAG_USE_BSPRT 0x01
+#define NDFMC_PLAT_FLAG_NO_RSTR 0x02
+#define NDFMC_PLAT_FLAG_HOLDADD 0x04
+#define NDFMC_PLAT_FLAG_DUMMYWRITE 0x08
+
+struct txx9ndfmc_platform_data {
+ unsigned int shift;
+ unsigned int gbus_clock;
+ unsigned int hold; /* hold time in nanosecond */
+ unsigned int spw; /* strobe pulse width in nanosecond */
+ unsigned int flags;
+ unsigned char ch_mask; /* available channel bitmask */
+ unsigned char wp_mask; /* write-protect bitmask */
+ unsigned char wide_mask; /* 16bit-nand bitmask */
+};
+
+void txx9_ndfmc_init(unsigned long baseaddr,
+ const struct txx9ndfmc_platform_data *plat_data);
+
+#endif /* __TXX9_NDFMC_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9206a4fef9ac..776c546d581a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -234,11 +234,13 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node);
+ struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index);
+struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ char *name);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
@@ -274,9 +276,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
static inline unsigned int
of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node)
+ struct device_node *np)
{
- return -ENODEV;
+ return 0;
}
static inline int genpd_dev_pm_attach(struct device *dev)
@@ -290,6 +292,12 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
return NULL;
}
+static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
+
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
@@ -301,6 +309,8 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
int dev_pm_domain_attach(struct device *dev, bool power_on);
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
@@ -313,6 +323,11 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
{
return NULL;
}
+static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index fdf86b4cbc71..7e0fdcf905d2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
pt->_key = ~(__poll_t)0; /* all events enabled */
}
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
{
- return file->f_op->get_poll_head && file->f_op->poll_mask;
+ return file->f_op->poll;
}
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
- return file->f_op->poll || file_has_poll_mask(file);
+ if (unlikely(!file->f_op->poll))
+ return DEFAULT_POLLMASK;
+ return file->f_op->poll(file, pt);
}
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
struct poll_table_entry {
struct file *filp;
__poll_t key;
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index c85704fcdbd2..ee7e987ea1b4 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -95,8 +95,8 @@ struct k_itimer {
clockid_t it_clock;
timer_t it_id;
int it_active;
- int it_overrun;
- int it_overrun_last;
+ s64 it_overrun;
+ s64 it_overrun_last;
int it_requeue_pending;
int it_sigev_notify;
ktime_t it_interval;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 6d7e800affd8..cf3eccfe1543 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -50,15 +50,15 @@ static inline const char *printk_skip_headers(const char *buffer)
/* We show everything that is MORE important than this.. */
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
-#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
/*
- * Default used to be hard-coded at 7, we're now allowing it to be set from
- * kernel config.
+ * Default used to be hard-coded at 7, quiet used to be hardcoded at 4,
+ * we're now allowing both to be set from kernel config.
*/
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
+#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
extern int console_printk[];
@@ -148,9 +148,13 @@ void early_printk(const char *s, ...) { }
#ifdef CONFIG_PRINTK_NMI
extern void printk_nmi_enter(void);
extern void printk_nmi_exit(void);
+extern void printk_nmi_direct_enter(void);
+extern void printk_nmi_direct_exit(void);
#else
static inline void printk_nmi_enter(void) { }
static inline void printk_nmi_exit(void) { }
+static inline void printk_nmi_direct_enter(void) { }
+static inline void printk_nmi_direct_exit(void) { }
#endif /* PRINTK_NMI */
#ifdef CONFIG_PRINTK
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 0174883a935a..1a941efcaa62 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -6,6 +6,7 @@
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
+static inline void pti_finalize(void) { }
#endif
#endif
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 8461b18e4608..13b4244d44c1 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -171,6 +171,14 @@
#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
+#define SSACD_ACDS_1 (0)
+#define SSACD_ACDS_2 (1)
+#define SSACD_ACDS_4 (2)
+#define SSACD_ACDS_8 (3)
+#define SSACD_ACDS_16 (4)
+#define SSACD_ACDS_32 (5)
+#define SSACD_SCDB_4X (0)
+#define SSACD_SCDB_1X (1)
#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
/* LPSS SSP */
@@ -212,8 +220,6 @@ struct ssp_device {
int type;
int use_count;
int irq;
- int drcmr_rx;
- int drcmr_tx;
struct device_node *of_node;
};
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index b401b962afff..5d65521260b3 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -87,6 +87,10 @@ static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
static inline int
qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
+static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *src,
+ struct qcom_scm_vmperm *newvm,
+ int dest_cnt) { return -ENODEV; }
static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 2978fa4add42..a1310482c4ed 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -39,6 +39,10 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
+#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
+#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
+
struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
@@ -49,6 +53,8 @@ struct qed_queue_start_common_params {
struct qed_sb_info *p_sb;
u8 sb_idx;
+
+ u8 tc;
};
struct qed_rxq_start_ret_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b4040023cbfb..8cd34645e892 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -759,6 +759,9 @@ struct qed_generic_tlvs {
u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
};
+#define QED_I2C_DEV_ADDR_A0 0xA0
+#define QED_I2C_DEV_ADDR_A2 0xA2
+
#define QED_NVM_SIGNATURE 0x12435687
enum qed_nvm_flash_cmd {
@@ -1026,6 +1029,18 @@ struct qed_common_ops {
* @param enabled - true iff WoL should be enabled.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
+
+/**
+ * @brief read_module_eeprom
+ *
+ * @param cdev
+ * @param buf - buffer
+ * @param dev_addr - PHY device memory region
+ * @param offset - offset into eeprom contents to be read
+ * @param len - buffer length, i.e., max bytes to be read
+ */
+ int (*read_module_eeprom)(struct qed_dev *cdev,
+ char *buf, u8 dev_addr, u32 offset, u32 len);
};
#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/random.h b/include/linux/random.h
index 2ddf13b4281e..445a0ea4ff49 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,9 +36,10 @@ extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
+extern bool rng_is_initialized(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern void get_random_bytes_arch(void *buf, int nbytes);
+extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 36df6ccbc874..4786c2235b98 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
*
* Iterate over the tail of a list starting from a given position,
* which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
*/
#define list_for_each_entry_from_rcu(pos, head, member) \
for (; &(pos)->member != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 65163aa0bb04..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
} while (0)
/*
- * Note a voluntary context switch for RCU-tasks benefit. This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- rcu_note_voluntary_context_switch_lite(t); \
+ rcu_tasks_qs(t); \
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched
#define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
*/
#define cond_resched_tasks_rcu_qs() \
do { \
- if (!cond_resched()) \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
+ cond_resched(); \
} while (0)
/*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
- * kill_dependency(). It could be used as follows:
- * ``
+ * kill_dependency(). It could be used as follows::
+ *
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
- *``
*/
#define rcu_pointer_handoff(p) (p)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#define rcu_note_context_switch(preempt) \
do { \
rcu_sched_qs(); \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
} while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
index e031e9f2f9d8..585ce89c0f33 100644
--- a/include/linux/reciprocal_div.h
+++ b/include/linux/reciprocal_div.h
@@ -25,6 +25,9 @@ struct reciprocal_value {
u8 sh1, sh2;
};
+/* "reciprocal_value" and "reciprocal_divide" together implement the basic
+ * version of the algorithm described in Figure 4.1 of the paper.
+ */
struct reciprocal_value reciprocal_value(u32 d);
static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
@@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
return (t + ((a - t) >> R.sh1)) >> R.sh2;
}
+struct reciprocal_value_adv {
+ u32 m;
+ u8 sh, exp;
+ bool is_wide_m;
+};
+
+/* "reciprocal_value_adv" implements the advanced version of the algorithm
+ * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose
+ * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The
+ * exception case could be easily handled before calling "reciprocal_value_adv".
+ *
+ * The advanced version requires more complex calculation to get the reciprocal
+ * multiplier and other control variables, but then could reduce the required
+ * emulation operations.
+ *
+ * It makes no sense to use this advanced version for host divide emulation,
+ * those extra complexities for calculating multiplier etc could completely
+ * waive our saving on emulation operations.
+ *
+ * However, it makes sense to use it for JIT divide code generation for which
+ * we are willing to trade performance of JITed code with that of host. As shown
+ * by the following pseudo code, the required emulation operations could go down
+ * from 6 (the basic version) to 3 or 4.
+ *
+ * To use the result of "reciprocal_value_adv", suppose we want to calculate
+ * n/d, the pseudo C code will be:
+ *
+ * struct reciprocal_value_adv rvalue;
+ * u8 pre_shift, exp;
+ *
+ * // handle exception case.
+ * if (d >= (1U << 31)) {
+ * result = n >= d;
+ * return;
+ * }
+ *
+ * rvalue = reciprocal_value_adv(d, 32)
+ * exp = rvalue.exp;
+ * if (rvalue.is_wide_m && !(d & 1)) {
+ * // floor(log2(d & (2^32 -d)))
+ * pre_shift = fls(d & -d) - 1;
+ * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift);
+ * } else {
+ * pre_shift = 0;
+ * }
+ *
+ * // code generation starts.
+ * if (imm == 1U << exp) {
+ * result = n >> exp;
+ * } else if (rvalue.is_wide_m) {
+ * // pre_shift must be zero when reached here.
+ * t = (n * rvalue.m) >> 32;
+ * result = n - t;
+ * result >>= 1;
+ * result += t;
+ * result >>= rvalue.sh - 1;
+ * } else {
+ * if (pre_shift)
+ * result = n >> pre_shift;
+ * result = ((u64)result * rvalue.m) >> 32;
+ * result >>= rvalue.sh;
+ * }
+ */
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec);
+
#endif /* _LINUX_RECIPROCAL_DIV_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 4193c41e383a..e28cce21bad6 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -3,9 +3,10 @@
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
+extern void refcount_add_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
+extern void refcount_inc_checked(refcount_t *r);
+
+extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
+extern void refcount_dec_checked(refcount_t *r);
+
#ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+#define refcount_add_not_zero refcount_add_not_zero_checked
+#define refcount_add refcount_add_checked
+
+#define refcount_inc_not_zero refcount_inc_not_zero_checked
+#define refcount_inc refcount_inc_checked
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+#define refcount_sub_and_test refcount_sub_and_test_checked
+
+#define refcount_dec_and_test refcount_dec_and_test_checked
+#define refcount_dec refcount_dec_checked
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
#else
# ifdef CONFIG_ARCH_HAS_REFCOUNT
# include <asm/refcount.h>
@@ -98,5 +112,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+ spinlock_t *lock,
+ unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4f38068ffb71..379505a53722 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @readable_noinc_reg: Optional callback returning true if the register
+ * supports multiple read operations without incrementing
+ * the register number. If this field is NULL but
+ * rd_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * readable if it belongs to one of the ranges specified
+ * by rd_noinc_table).
* @disable_locking: This regmap is either protected by external means or
* is guaranteed not be be accessed from multiple threads.
* Don't use any locking mechanisms.
@@ -295,6 +302,7 @@ typedef void (*regmap_unlock)(void *);
* @rd_table: As above, for read access.
* @volatile_table: As above, for volatile registers.
* @precious_table: As above, for precious registers.
+ * @rd_noinc_table: As above, for no increment readable registers.
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
@@ -344,6 +352,7 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
bool disable_locking;
regmap_lock lock;
@@ -360,6 +369,7 @@ struct regmap_config {
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
enum regcache_type cache_type;
@@ -514,6 +524,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -558,6 +572,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -646,6 +664,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_sccb() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* regmap_init_slimbus() - Initialise register map
*
* @slimbus: Device that will be interacted with
@@ -798,6 +829,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_sccb() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* devm_regmap_init_spi() - Initialise register map
*
* @dev: Device that will be interacted with
@@ -946,6 +991,8 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
@@ -1196,6 +1243,13 @@ static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
void *val, size_t val_count)
{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fc2dc8df476f..0fd8fbb74763 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -46,7 +46,7 @@ enum regulator_status {
/**
* struct regulator_linear_range - specify linear voltage ranges
*
- * Specify a range of voltages for regulator_map_linar_range() and
+ * Specify a range of voltages for regulator_map_linear_range() and
* regulator_list_linear_range().
*
* @min_uV: Lowest voltage in range
@@ -220,7 +220,7 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
- int (*resume_early)(struct regulator_dev *rdev);
+ int (*resume)(struct regulator_dev *rdev);
int (*set_pull_down) (struct regulator_dev *);
};
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index e0ccf46f66cf..cb5aecd40f07 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -64,6 +64,17 @@
#define PFUZE3000_VLDO3 11
#define PFUZE3000_VLDO4 12
+#define PFUZE3001_SW1 0
+#define PFUZE3001_SW2 1
+#define PFUZE3001_SW3 2
+#define PFUZE3001_VSNVS 3
+#define PFUZE3001_VLDO1 4
+#define PFUZE3001_VLDO2 5
+#define PFUZE3001_VCCSD 6
+#define PFUZE3001_V33 7
+#define PFUZE3001_VLDO3 8
+#define PFUZE3001_VLDO4 9
+
struct regulator_init_data;
struct pfuze_regulator_platform_data {
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e6a0031d1b1f..8ad2487a86d5 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -66,7 +66,7 @@ struct rfkill_ops {
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
- * rfkill_alloc - allocate rfkill structure
+ * rfkill_alloc - Allocate rfkill structure
* @name: name of the struct -- the string is not copied internally
* @parent: device that has rf switch on it
* @type: type of the switch (RFKILL_TYPE_*)
@@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill);
/**
* rfkill_resume_polling(struct rfkill *rfkill)
*
- * Pause polling -- say transmitter is off for other reasons.
+ * Resume polling
* NOTE: not necessary for suspend/resume -- in that case the
* core stops polling anyway
*/
@@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill);
void rfkill_unregister(struct rfkill *rfkill);
/**
- * rfkill_destroy - free rfkill structure
+ * rfkill_destroy - Free rfkill structure
* @rfkill: rfkill structure to be destroyed
*
* Destroys the rfkill structure.
@@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill);
/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current hardware block state to set
+ * @blocked: the current hardware block state to set
*
* rfkill drivers that get events when the hard-blocked state changes
* use this function to notify the rfkill core (and through that also
@@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
@@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_init_sw_state - Initialize persistent software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that preserve their software block state over power off
* use this function to notify the rfkill core (and through that also
@@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
/**
- * rfkill_blocked - query rfkill block
+ * rfkill_blocked - Query rfkill block state
*
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
/**
- * rfkill_find_type - Helpper for finding rfkill type by name
+ * rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that conrresponds the name.
+ * Returns enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
@@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name)
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
/**
- * rfkill_set_led_trigger_name -- set the LED trigger name
+ * rfkill_set_led_trigger_name - Set the LED trigger name
* @rfkill: rfkill struct
* @name: LED trigger name
*
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
new file mode 100644
index 000000000000..763d613ce2c2
--- /dev/null
+++ b/include/linux/rhashtable-types.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Simple structures that might be needed in include
+ * files.
+ */
+
+#ifndef _LINUX_RHASHTABLE_TYPES_H
+#define _LINUX_RHASHTABLE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct rhash_head {
+ struct rhash_head __rcu *next;
+};
+
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
+struct bucket_table;
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
+ * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
+ */
+struct rhashtable_params {
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
+ unsigned int max_size;
+ u16 min_size;
+ bool automatic_shrinking;
+ u8 locks_mul;
+ rht_hashfn_t hashfn;
+ rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @key_len: Key length for hashfn
+ * @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
+ */
+struct rhashtable {
+ struct bucket_table __rcu *tbl;
+ unsigned int key_len;
+ unsigned int max_elems;
+ struct rhashtable_params p;
+ bool rhlist;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+ atomic_t nelems;
+};
+
+/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @list: Current hash list pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
+ unsigned int slot;
+ unsigned int skip;
+ bool end_of_table;
+};
+
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
+
+#endif /* _LINUX_RHASHTABLE_TYPES_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 4e1f535c2034..eb7111039247 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
@@ -17,37 +18,18 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/atomic.h>
-#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/rculist.h>
+#include <linux/rhashtable-types.h>
/*
* The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base | Hash |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- * Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit) : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
+ * the least significant bit set.
*/
-#define RHT_BASE_BITS 4
-#define RHT_HASH_BITS 27
-#define RHT_BASE_SHIFT RHT_HASH_BITS
-
-/* Base bits plus 1 bit for nulls marker */
-#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
/* Maximum chain length before rehash
*
@@ -64,15 +46,6 @@
*/
#define RHT_ELASTICITY 16u
-struct rhash_head {
- struct rhash_head __rcu *next;
-};
-
-struct rhlist_head {
- struct rhash_head rhead;
- struct rhlist_head __rcu *next;
-};
-
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -102,132 +75,14 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
-/**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
-struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
-};
-
-typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
- const void *obj);
-
-struct rhashtable;
-
-/**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @nulls_base: Base value to generate nulls marker
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
-struct rhashtable_params {
- u16 nelem_hint;
- u16 key_len;
- u16 key_offset;
- u16 head_offset;
- unsigned int max_size;
- u16 min_size;
- bool automatic_shrinking;
- u8 locks_mul;
- u32 nulls_base;
- rht_hashfn_t hashfn;
- rht_obj_hashfn_t obj_hashfn;
- rht_obj_cmpfn_t obj_cmpfn;
-};
-
-/**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @key_len: Key length for hashfn
- * @max_elems: Maximum number of elements in table
- * @p: Configuration parameters
- * @rhlist: True if this is an rhltable
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- * @nelems: Number of elements in table
- */
-struct rhashtable {
- struct bucket_table __rcu *tbl;
- unsigned int key_len;
- unsigned int max_elems;
- struct rhashtable_params p;
- bool rhlist;
- struct work_struct run_work;
- struct mutex mutex;
- spinlock_t lock;
- atomic_t nelems;
-};
-
-/**
- * struct rhltable - Hash table with duplicate objects in a list
- * @ht: Underlying rhtable
- */
-struct rhltable {
- struct rhashtable ht;
-};
-
-/**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
- */
-struct rhashtable_walker {
- struct list_head list;
- struct bucket_table *tbl;
-};
-
-/**
- * struct rhashtable_iter - Hash table iterator
- * @ht: Table to iterate through
- * @p: Current pointer
- * @list: Current hash list pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
-struct rhashtable_iter {
- struct rhashtable *ht;
- struct rhash_head *p;
- struct rhlist_head *list;
- struct rhashtable_walker walker;
- unsigned int slot;
- unsigned int skip;
- bool end_of_table;
-};
-
-static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
-{
- return NULLS_MARKER(ht->p.nulls_base + hash);
-}
-
-#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
- ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+#define INIT_RHT_NULLS_HEAD(ptr) \
+ ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
return ((unsigned long) ptr & 1);
}
-static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
-{
- return ((unsigned long) ptr) >> 1;
-}
-
static inline void *rht_obj(const struct rhashtable *ht,
const struct rhash_head *he)
{
@@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht,
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
unsigned int hash)
{
- return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+ return hash & (tbl->size - 1);
}
static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
@@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht,
- const struct rhashtable_params *params);
-int rhltable_init(struct rhltable *hlt,
- const struct rhashtable_params *params);
-
void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj);
@@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
- if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+ if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
spin_unlock_bh(lock);
rcu_read_unlock();
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b72ebdff0b77..003d09ab308d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 64125443f8a6..5ef5c7c412a7 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -354,6 +354,8 @@ struct rmi_driver_data {
struct mutex irq_mutex;
struct input_dev *input;
+ struct irq_domain *irqdomain;
+
u8 pdt_props;
u8 num_rx_electrodes;
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1b92a28dd672..6fd615a0eea9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void rt_mutex_destroy(struct rt_mutex *lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#else
extern void rt_mutex_lock(struct rt_mutex *lock);
+#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#endif
+
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e6539536dea9..804a50983ec5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -23,6 +23,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+struct seq_file;
+
/**
* struct sbitmap_word - Word in a &struct sbitmap.
*/
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 51f52020ad5f..093aa57120b0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -9,9 +9,6 @@
#include <asm/io.h>
struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
@@ -64,7 +61,6 @@ struct sg_table {
*
*/
-#define SG_MAGIC 0x87654321
#define SG_CHAIN 0x01UL
#define SG_END 0x02UL
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
*/
BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
/*
* Set termination bit, clear potential chain bit
*/
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
sg->page_link &= ~SG_END;
}
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
static inline void sg_init_marker(struct scatterlist *sgl,
unsigned int nents)
{
-#ifdef CONFIG_DEBUG_SG
- unsigned int i;
-
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
-#endif
sg_mark_end(&sgl[nents - 1]);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 87bf02d93a27..95a5018c338e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,7 +118,7 @@ struct task_group;
* the comment with set_special_state().
*/
#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
#define __set_current_state(state_value) \
do { \
@@ -167,8 +167,8 @@ struct task_group;
* need_sleep = false;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -734,6 +734,10 @@ struct task_struct {
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ /* to be used once the psi infrastructure lands upstream. */
+ unsigned use_memdelay:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -1017,7 +1021,6 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct list_head numa_entry;
struct numa_group *numa_group;
/*
@@ -1151,6 +1154,10 @@ struct task_struct {
unsigned int memcg_nr_pages_over_high;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ struct request_queue *throttle_queue;
+#endif
+
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
@@ -1799,20 +1806,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
if (current->rseq)
- __rseq_handle_notify_resume(regs);
+ __rseq_handle_notify_resume(ksig, regs);
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1840,7 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
@@ -1847,7 +1854,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
- rseq_preempt(t);
}
}
@@ -1864,10 +1870,12 @@ static inline void rseq_execve(struct task_struct *t)
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1c1a1512ec55..913488d828cb 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -40,7 +40,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-extern __read_mostly unsigned int sysctl_sched_time_avg;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 5be31eb7b266..108ede99e533 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
extern void free_task(struct task_struct *tsk);
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 411b52e424e1..abe28d5cb3f4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -9,17 +9,16 @@
#define LINUX_SCHED_CLOCK
#ifdef CONFIG_GENERIC_SCHED_CLOCK
-extern void sched_clock_postinit(void);
+extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
#else
-static inline void sched_clock_postinit(void) { }
+static inline void generic_sched_clock_init(void) { }
static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
- ;
}
#endif
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index b36c76635f18..83d94341e003 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -801,4 +801,11 @@ struct sctp_strreset_resptsn {
__be32 receivers_next_tsn;
};
+enum {
+ SCTP_DSCP_SET_MASK = 0x1,
+ SCTP_DSCP_VAL_MASK = 0xfc,
+ SCTP_FLOWLABEL_SET_MASK = 0x100000,
+ SCTP_FLOWLABEL_VAL_MASK = 0xfffff
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 63030c85ee19..75f4156c84d7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -159,6 +159,27 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
typedef int (*initxattrs) (struct inode *inode,
const struct xattr *xattr_array, void *fs_data);
+
+/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */
+#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM,
+#define __data_id_stringify(dummy, str) #str,
+
+enum kernel_load_data_id {
+ __kernel_read_file_id(__data_id_enumify)
+};
+
+static const char * const kernel_load_data_str[] = {
+ __kernel_read_file_id(__data_id_stringify)
+};
+
+static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
+{
+ if ((unsigned)id >= LOADING_MAX_ID)
+ return kernel_load_data_str[LOADING_UNKNOWN];
+
+ return kernel_load_data_str[id];
+}
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -309,7 +330,7 @@ void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
-int security_file_open(struct file *file, const struct cred *cred);
+int security_file_open(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -320,6 +341,7 @@ void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
+int security_kernel_load_data(enum kernel_load_data_id id);
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -858,8 +880,7 @@ static inline int security_file_receive(struct file *file)
return 0;
}
-static inline int security_file_open(struct file *file,
- const struct cred *cred)
+static inline int security_file_open(struct file *file)
{
return 0;
}
@@ -909,6 +930,11 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
+static inline int security_kernel_load_data(enum kernel_load_data_id id)
+{
+ return 0;
+}
+
static inline int security_kernel_read_file(struct file *file,
enum kernel_read_file_id id)
{
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index ebce9e24906a..d37518e89db2 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -231,6 +231,50 @@ struct sfp_eeprom_id {
struct sfp_eeprom_ext ext;
} __packed;
+struct sfp_diag {
+ __be16 temp_high_alarm;
+ __be16 temp_low_alarm;
+ __be16 temp_high_warn;
+ __be16 temp_low_warn;
+ __be16 volt_high_alarm;
+ __be16 volt_low_alarm;
+ __be16 volt_high_warn;
+ __be16 volt_low_warn;
+ __be16 bias_high_alarm;
+ __be16 bias_low_alarm;
+ __be16 bias_high_warn;
+ __be16 bias_low_warn;
+ __be16 txpwr_high_alarm;
+ __be16 txpwr_low_alarm;
+ __be16 txpwr_high_warn;
+ __be16 txpwr_low_warn;
+ __be16 rxpwr_high_alarm;
+ __be16 rxpwr_low_alarm;
+ __be16 rxpwr_high_warn;
+ __be16 rxpwr_low_warn;
+ __be16 laser_temp_high_alarm;
+ __be16 laser_temp_low_alarm;
+ __be16 laser_temp_high_warn;
+ __be16 laser_temp_low_warn;
+ __be16 tec_cur_high_alarm;
+ __be16 tec_cur_low_alarm;
+ __be16 tec_cur_high_warn;
+ __be16 tec_cur_low_warn;
+ __be32 cal_rxpwr4;
+ __be32 cal_rxpwr3;
+ __be32 cal_rxpwr2;
+ __be32 cal_rxpwr1;
+ __be32 cal_rxpwr0;
+ __be16 cal_txi_slope;
+ __be16 cal_txi_offset;
+ __be16 cal_txpwr_slope;
+ __be16 cal_txpwr_offset;
+ __be16 cal_t_slope;
+ __be16 cal_t_offset;
+ __be16 cal_v_slope;
+ __be16 cal_v_offset;
+} __packed;
+
/* SFP EEPROM registers */
enum {
SFP_PHYS_ID = 0x00,
@@ -384,7 +428,33 @@ enum {
SFP_TEC_CUR = 0x6c,
SFP_STATUS = 0x6e,
- SFP_ALARM = 0x70,
+ SFP_ALARM0 = 0x70,
+ SFP_ALARM0_TEMP_HIGH = BIT(7),
+ SFP_ALARM0_TEMP_LOW = BIT(6),
+ SFP_ALARM0_VCC_HIGH = BIT(5),
+ SFP_ALARM0_VCC_LOW = BIT(4),
+ SFP_ALARM0_TX_BIAS_HIGH = BIT(3),
+ SFP_ALARM0_TX_BIAS_LOW = BIT(2),
+ SFP_ALARM0_TXPWR_HIGH = BIT(1),
+ SFP_ALARM0_TXPWR_LOW = BIT(0),
+
+ SFP_ALARM1 = 0x71,
+ SFP_ALARM1_RXPWR_HIGH = BIT(7),
+ SFP_ALARM1_RXPWR_LOW = BIT(6),
+
+ SFP_WARN0 = 0x74,
+ SFP_WARN0_TEMP_HIGH = BIT(7),
+ SFP_WARN0_TEMP_LOW = BIT(6),
+ SFP_WARN0_VCC_HIGH = BIT(5),
+ SFP_WARN0_VCC_LOW = BIT(4),
+ SFP_WARN0_TX_BIAS_HIGH = BIT(3),
+ SFP_WARN0_TX_BIAS_LOW = BIT(2),
+ SFP_WARN0_TXPWR_HIGH = BIT(1),
+ SFP_WARN0_TXPWR_LOW = BIT(0),
+
+ SFP_WARN1 = 0x75,
+ SFP_WARN1_RXPWR_HIGH = BIT(7),
+ SFP_WARN1_RXPWR_LOW = BIT(6),
SFP_EXT_STATUS = 0x76,
SFP_VSL = 0x78,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c86885954994..17a13e4785fc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
+ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -640,6 +641,7 @@ typedef unsigned char *sk_buff_data_t;
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @dst_pending_confirm: need to confirm neighbour
+ * @decrypted: Decrypted SKB
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
@@ -674,12 +676,16 @@ struct sk_buff {
* UDP receive path is one user.
*/
unsigned long dev_scratch;
- int ip_defrag_offset;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
+ struct list_head list;
+ };
+
+ union {
+ struct sock *sk;
+ int ip_defrag_offset;
};
- struct sock *sk;
union {
ktime_t tstamp;
@@ -735,7 +741,7 @@ struct sk_buff {
peeked:1,
head_frag:1,
xmit_more:1,
- __unused:1; /* one bit hole */
+ pfmemalloc:1;
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
@@ -754,31 +760,30 @@ struct sk_buff {
__u8 __pkt_type_offset[0];
__u8 pkt_type:3;
- __u8 pfmemalloc:1;
__u8 ignore_df:1;
-
__u8 nf_trace:1;
__u8 ip_summed:2;
__u8 ooo_okay:1;
+
__u8 l4_hash:1;
__u8 sw_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
-
__u8 no_fcs:1;
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
+
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
-
__u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
__u8 ipvs_property:1;
+
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
@@ -791,6 +796,9 @@ struct sk_buff {
__u8 tc_redirected:1;
__u8 tc_from_ingress:1;
#endif
+#ifdef CONFIG_TLS_DEVICE
+ __u8 decrypted:1;
+#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -1030,6 +1038,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
}
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+void skb_headers_offset_update(struct sk_buff *skb, int off);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
@@ -2354,7 +2363,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0))
+ if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -2580,7 +2589,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-void skb_rbtree_purge(struct rb_root *root);
+unsigned int skb_rbtree_purge(struct rb_root *root);
void *netdev_alloc_frag(unsigned int fragsz);
@@ -3252,7 +3261,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 09fa2c6f0e68..3a1a1dbc6f49 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -155,8 +155,12 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
static inline void sysfs_slab_release(struct kmem_cache *s)
{
}
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index c174844cf663..d0884b525001 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
* parked (cpu offline)
* @unpark: Optional unpark function, called when the thread is
* unparked (cpu online)
- * @cpumask: Internal state. To update which threads are unparked,
- * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
void (*cleanup)(unsigned int cpu, bool online);
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
- cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *cpumask);
-
-static inline int
-smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
-{
- return smpboot_register_percpu_thread_cpumask(plug_thread,
- cpu_possible_mask);
-}
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *);
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
new file mode 100644
index 000000000000..7e3b9c605ab2
--- /dev/null
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/platform_device.h>
+#ifndef __LLCC_QCOM__
+#define __LLCC_QCOM__
+
+#define LLCC_CPUSS 1
+#define LLCC_VIDSC0 2
+#define LLCC_VIDSC1 3
+#define LLCC_ROTATOR 4
+#define LLCC_VOICE 5
+#define LLCC_AUDIO 6
+#define LLCC_MDMHPGRW 7
+#define LLCC_MDM 8
+#define LLCC_CMPT 10
+#define LLCC_GPUHTW 11
+#define LLCC_GPU 12
+#define LLCC_MMUHWT 13
+#define LLCC_CMPTDMA 15
+#define LLCC_DISP 16
+#define LLCC_VIDFW 17
+#define LLCC_MDMHPFX 20
+#define LLCC_MDMPNG 21
+#define LLCC_AUDHW 22
+
+/**
+ * llcc_slice_desc - Cache slice descriptor
+ * @slice_id: llcc slice id
+ * @slice_size: Size allocated for the llcc slice
+ */
+struct llcc_slice_desc {
+ u32 slice_id;
+ size_t slice_size;
+};
+
+/**
+ * llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: usecase id for which the llcc slice is used
+ * @slice_id: llcc slice id assigned to each slice
+ * @max_cap: maximum capacity of the llcc slice
+ * @priority: priority of the llcc slice
+ * @fixed_size: whether the llcc slice can grow beyond its size
+ * @bonus_ways: bonus ways associated with llcc slice
+ * @res_ways: reserved ways associated with llcc slice
+ * @cache_mode: mode of the llcc slice
+ * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
+ * @dis_cap_alloc: Disable capacity based allocation
+ * @retain_on_pc: Retain through power collapse
+ * @activate_on_init: activate the slice on init
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
+};
+
+/**
+ * llcc_drv_data - Data associated with the llcc driver
+ * @regmap: regmap associated with the llcc device
+ * @cfg: pointer to the data structure for slice configuration
+ * @lock: mutex associated with each slice
+ * @cfg_size: size of the config data table
+ * @max_slices: max slices as read from device tree
+ * @bcast_off: Offset of the broadcast bank
+ * @num_banks: Number of llcc banks
+ * @bitmap: Bit map to track the active slice ids
+ * @offsets: Pointer to the bank offsets array
+ */
+struct llcc_drv_data {
+ struct regmap *regmap;
+ const struct llcc_slice_config *cfg;
+ struct mutex lock;
+ u32 cfg_size;
+ u32 max_slices;
+ u32 bcast_off;
+ u32 num_banks;
+ unsigned long *bitmap;
+ u32 *offsets;
+};
+
+#if IS_ENABLED(CONFIG_QCOM_LLCC)
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id of the client
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_id - get slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_size - llcc slice size
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc);
+
+/**
+ * qcom_llcc_probe - program the sct table
+ * @pdev: platform device pointer
+ * @table: soc sct table
+ * @sz: Size of the config table
+ */
+int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *table, u32 sz);
+#else
+static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ return NULL;
+}
+
+static inline void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+
+};
+
+static inline int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+
+static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ return 0;
+}
+static inline int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+
+static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+static inline int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *table, u32 sz)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_llcc_remove(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 66dcb9ec273a..5addaf5ccbce 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -42,7 +42,9 @@
#define EXYNOS_SWRESET 0x0400
#define S5P_WAKEUP_STAT 0x0600
-#define S5P_EINT_WAKEUP_MASK 0x0604
+/* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */
+#define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff
+#define EXYNOS_EINT_WAKEUP_MASK 0x0604
#define S5P_WAKEUP_MASK 0x0608
#define S5P_WAKEUP_MASK2 0x0614
@@ -180,6 +182,9 @@
#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
+/* Only for S5Pv210 */
+#define S5PV210_EINT_WAKEUP_MASK 0xC004
+
/* Only for EXYNOS4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
@@ -641,6 +646,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
/* For EXYNOS5433 */
+#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C)
#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h
deleted file mode 100644
index c84123aa1d06..000000000000
--- a/include/linux/spi/adi_spi3.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Analog Devices SPI3 controller driver
- *
- * Copyright (c) 2014 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ADI_SPI3_H_
-#define _ADI_SPI3_H_
-
-#include <linux/types.h>
-
-/* SPI_CONTROL */
-#define SPI_CTL_EN 0x00000001 /* Enable */
-#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */
-#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */
-#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */
-#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */
-#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */
-#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */
-#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */
-#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */
-#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */
-#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */
-#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */
-#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */
-#define SPI_CTL_LSBF 0x00001000 /* LSB First */
-#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */
-#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */
-#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */
-#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */
-#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */
-#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */
-#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */
-#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */
-#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */
-#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */
-#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */
-#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */
-#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */
-/* SPI_RX_CONTROL */
-#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */
-#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */
-#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */
-#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */
-#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */
-#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */
-#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */
-#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */
-#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */
-#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */
-#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */
-#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */
-#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */
-#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */
-#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */
-#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */
-#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */
-#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */
-#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */
-#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */
-#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */
-#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */
-/* SPI_TX_CONTROL */
-#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */
-#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */
-#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */
-#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */
-#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */
-#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */
-#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */
-#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */
-#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */
-#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */
-#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */
-#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */
-#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */
-#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */
-#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */
-#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */
-#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */
-#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */
-#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */
-#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */
-#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */
-#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */
-/* SPI_CLOCK */
-#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */
-/* SPI_DELAY */
-#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */
-#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */
-#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */
-/* SPI_SSEL */
-#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */
-#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */
-#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */
-#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */
-#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */
-#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */
-#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */
-#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */
-#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */
-#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */
-#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */
-#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */
-#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */
-#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */
-/* SPI_RWC */
-#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */
-/* SPI_RWCR */
-#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */
-/* SPI_TWC */
-#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */
-/* SPI_TWCR */
-#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */
-/* SPI_IMASK */
-#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_IMASKCL */
-#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_IMASKST */
-#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_STATUS */
-#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */
-#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */
-#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */
-#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */
-#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */
-#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */
-#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */
-#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */
-#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */
-#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */
-#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */
-#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */
-#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */
-#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */
-#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */
-#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */
-#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */
-#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */
-#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */
-#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */
-#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */
-#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */
-#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */
-/* SPI_ILAT */
-#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
-#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
-#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */
-#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */
-#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */
-#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */
-#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */
-/* SPI_ILATCL */
-#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
-#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
-#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */
-#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */
-#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */
-#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */
-#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */
-
-/*
- * adi spi3 registers layout
- */
-struct adi_spi_regs {
- u32 revid;
- u32 control;
- u32 rx_control;
- u32 tx_control;
- u32 clock;
- u32 delay;
- u32 ssel;
- u32 rwc;
- u32 rwcr;
- u32 twc;
- u32 twcr;
- u32 reserved0;
- u32 emask;
- u32 emaskcl;
- u32 emaskst;
- u32 reserved1;
- u32 status;
- u32 elat;
- u32 elatcl;
- u32 reserved2;
- u32 rfifo;
- u32 reserved3;
- u32 tfifo;
-};
-
-#define MAX_CTRL_CS 8 /* cs in spi controller */
-
-/* device.platform_data for SSP controller devices */
-struct adi_spi3_master {
- u16 num_chipselect;
- u16 pin_req[7];
-};
-
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
- */
-struct adi_spi3_chip {
- u32 control;
- u16 cs_chg_udelay; /* Some devices require 16-bit delays */
- u32 tx_dummy_val; /* tx value for rx only transfer */
- bool enable_dma;
-};
-
-#endif /* _ADI_SPI3_H_ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index bb4bd15ae1f6..b2bd4b4127c4 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -3,7 +3,9 @@
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
- * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ * Author:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __LINUX_SPI_MEM_H
@@ -122,7 +124,8 @@ struct spi_mem_op {
/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
- * @drvpriv: spi_mem_drviver private data
+ * @drvpriv: spi_mem_driver private data
+ * @name: name of the SPI memory device
*
* Extra information that describe the SPI memory device and may be needed by
* the controller to properly handle this device should be placed here.
@@ -133,6 +136,7 @@ struct spi_mem_op {
struct spi_mem {
struct spi_device *spi;
void *drvpriv;
+ const char *name;
};
/**
@@ -165,6 +169,13 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * @get_name: get a custom name for the SPI mem device from the controller.
+ * This might be needed if the controller driver has been ported
+ * to use the SPI mem layer and a custom name is used to keep
+ * mtdparts compatible.
+ * Note that if the implementation of this function allocates memory
+ * dynamically, then it should do so with devm_xxx(), as we don't
+ * have a ->free_name() function.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -176,6 +187,7 @@ struct spi_controller_mem_ops {
const struct spi_mem_op *op);
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
+ const char *(*get_name)(struct spi_mem *mem);
};
/**
@@ -234,6 +246,8 @@ bool spi_mem_supports_op(struct spi_mem *mem,
int spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op);
+const char *spi_mem_get_name(struct spi_mem *mem);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 51d8c060e513..b7e021b274dc 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -8,7 +8,7 @@ struct spi_bitbang {
struct mutex lock;
u8 busy;
u8 use_dma;
- u8 flags; /* extra spi->mode support */
+ u16 flags; /* extra spi->mode support */
struct spi_master *master;
@@ -30,7 +30,8 @@ struct spi_bitbang {
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
u32 (*txrx_word[4])(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits);
+ u32 word, u8 bits, unsigned flags);
+ int (*set_line_direction)(struct spi_device *spi, bool output);
};
/* you can call these default bitbang->master methods from your custom
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1e8a46435838..3190997df9ca 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -114,29 +114,48 @@ do { \
#endif /*arch_spin_is_contended*/
/*
- * This barrier must provide two things:
+ * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
+ * between program-order earlier lock acquisitions and program-order later
+ * memory accesses.
*
- * - it must guarantee a STORE before the spin_lock() is ordered against a
- * LOAD after it, see the comments at its two usage sites.
+ * This guarantees that the following two properties hold:
*
- * - it must ensure the critical section is RCsc.
+ * 1) Given the snippet:
*
- * The latter is important for cases where we observe values written by other
- * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ * { X = 0; Y = 0; }
*
- * CPU0 CPU1 CPU2
+ * CPU0 CPU1
*
- * for (;;) {
- * if (READ_ONCE(X))
- * break;
- * }
- * X=1
- * <sched-out>
- * <sched-in>
- * r = X;
+ * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
+ * spin_lock(S); smp_mb();
+ * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
+ * r0 = READ_ONCE(Y);
+ * spin_unlock(S);
*
- * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
- * we get migrated and CPU2 sees X==0.
+ * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
+ * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
+ * preceding the call to smp_mb__after_spinlock() in __schedule() and in
+ * try_to_wake_up().
+ *
+ * 2) Given the snippet:
+ *
+ * { X = 0; Y = 0; }
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
+ * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
+ * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
+ * WRITE_ONCE(Y, 1);
+ * spin_unlock(S);
+ *
+ * it is forbidden that CPU0's critical section executes before CPU1's
+ * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
+ * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
+ * preceding the calls to smp_rmb() in try_to_wake_up() for similar
+ * snippets but "projected" onto two CPUs.
+ *
+ * Property (2) upgrades the lock to an RCsc lock.
*
* Since most load-store architectures implement ACQUIRE with an smp_mb() after
* the LL/SC loop, they need no further barriers. Similarly all our TSO
@@ -427,6 +446,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 91494d7e8e41..3e72a291c401 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
return retval;
}
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+ int retval;
+
+ retval = __srcu_read_lock(sp);
+ return retval;
+}
+
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__srcu_read_unlock(sp, idx);
}
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+ __srcu_read_unlock(sp, idx);
+}
+
/**
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
*
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 3b43655cabe6..0d5a2691e7e9 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -499,11 +499,9 @@ struct ssb_bus {
/* Internal-only stuff follows. Do not touch. */
struct list_head list;
-#ifdef CONFIG_SSB_DEBUG
/* Is the bus already powered up? */
bool powered_up;
int power_warn_count;
-#endif /* DEBUG */
};
enum ssb_quirks {
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32feac5bbd75..c43e9a01b892 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -190,5 +190,6 @@ struct plat_stmmacenet_data {
bool tso_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
+ int has_xgmac;
};
#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 440b62f7502e..5a28ac9284f0 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -414,7 +414,7 @@ static inline bool hibernation_available(void) { return false; }
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
#define PM_POST_RESTORE 0x0006 /* Restore failed */
-extern struct mutex pm_mutex;
+extern struct mutex system_transition_mutex;
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
diff --git a/include/linux/swait.h b/include/linux/swait.h
index bf8cb0dee23c..73e06e9986d4 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -16,7 +16,7 @@
* wait-queues, but the semantics are actually completely different, and
* every single user we have ever had has been buggy (or pointless).
*
- * A "swake_up()" only wakes up _one_ waiter, which is not at all what
+ * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
* "wake_up()" does, and has led to problems. In other cases, it has
* been fine, because there's only ever one waiter (kvm), but in that
* case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -38,8 +38,8 @@
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
* sleeper state.
*
- * - the exclusive mode; because this requires preserving the list order
- * and this is hard.
+ * - the !exclusive mode; because that leads to O(n) wakeups, everything is
+ * exclusive.
*
* - custom wake callback functions; because you cannot give any guarantees
* about random code. This also allows swait to be used in RT, such that
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
* CPU0 - waker CPU1 - waiter
*
* for (;;) {
- * @cond = true; prepare_to_swait(&wq_head, &wait, state);
+ * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
* smp_mb(); // smp_mb() from set_current_state()
* if (swait_active(wq_head)) if (@cond)
* wake_up(wq_head); break;
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
return swait_active(wq);
}
-extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
-extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
#define ___swait_event(wq, condition, state, ret, cmd) \
({ \
+ __label__ __out; \
struct swait_queue __wait; \
long __ret = ret; \
\
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- break; \
+ goto __out; \
} \
\
cmd; \
} \
finish_swait(&wq, &__wait); \
- __ret; \
+__out: __ret; \
})
#define __swait_event(wq, condition) \
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
schedule())
-#define swait_event(wq, condition) \
+#define swait_event_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -208,7 +208,7 @@ do { \
TASK_UNINTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_timeout(wq, condition, timeout) \
+#define swait_event_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -220,7 +220,7 @@ do { \
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
schedule())
-#define swait_event_interruptible(wq, condition) \
+#define swait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
@@ -233,7 +233,7 @@ do { \
TASK_INTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
+#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -246,7 +246,7 @@ do { \
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
/**
- * swait_event_idle - wait without system load contribution
+ * swait_event_idle_exclusive - wait without system load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
@@ -257,7 +257,7 @@ do { \
* condition and doesn't want to contribute to system load. Signals are
* ignored.
*/
-#define swait_event_idle(wq, condition) \
+#define swait_event_idle_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -270,7 +270,7 @@ do { \
__ret = schedule_timeout(__ret))
/**
- * swait_event_idle_timeout - wait up to timeout without load contribution
+ * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do { \
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
-#define swait_event_idle_timeout(wq, condition, timeout) \
+#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c063443d8638..1a8bd05a335e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -629,7 +629,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
return memcg->swappiness;
}
-
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
@@ -637,6 +636,16 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
+#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
+ gfp_t gfp_mask);
+#else
+static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
+ int node, gfp_t gfp_mask)
+{
+}
+#endif
+
#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 06bd7b096167..e06febf62978 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -10,5 +10,7 @@ extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern unsigned long generic_max_swapfile_size(void);
+extern unsigned long max_swapfile_size(void);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 73810808cdf2..2ff814c92f7f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -11,6 +11,7 @@
#ifndef _LINUX_SYSCALLS_H
#define _LINUX_SYSCALLS_H
+struct __aio_sigset;
struct epoll_event;
struct iattr;
struct inode;
@@ -80,6 +81,7 @@ union bpf_attr;
#include <linux/unistd.h>
#include <linux/quota.h>
#include <linux/key.h>
+#include <linux/personality.h>
#include <trace/syscall.h>
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
@@ -231,6 +233,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
*/
#ifndef __SYSCALL_DEFINEx
#define __SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_sys##name)))); \
ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
@@ -243,6 +248,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
+ __diag_pop(); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* __SYSCALL_DEFINEx */
@@ -501,9 +507,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
- const struct itimerspec __user *utmr,
- struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+ const struct __kernel_itimerspec __user *utmr,
+ struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -568,10 +574,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
asmlinkage long sys_timer_gettime(timer_t timer_id,
- struct itimerspec __user *setting);
+ struct __kernel_itimerspec __user *setting);
asmlinkage long sys_timer_getoverrun(timer_t timer_id);
asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
- const struct itimerspec __user *new_setting,
+ const struct __kernel_itimerspec __user *new_setting,
struct itimerspec __user *old_setting);
asmlinkage long sys_timer_delete(timer_t timer_id);
asmlinkage long sys_clock_settime(clockid_t which_clock,
@@ -1277,4 +1283,14 @@ static inline long ksys_truncate(const char __user *pathname, loff_t length)
return do_sys_truncate(pathname, length);
}
+static inline unsigned int ksys_personality(unsigned int personality)
+{
+ unsigned int old = current->personality;
+
+ if (personality != 0xffffffff)
+ set_personality(personality);
+
+ return old;
+}
+
#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index b8bfdc173ec0..3c12198c0103 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index c6aa8a3c42ed..b9626aa7e90c 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -37,9 +37,33 @@ struct t10_pi_tuple {
#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff)
#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff)
+static inline u32 t10_pi_ref_tag(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ return blk_rq_pos(rq) >>
+ (rq->q->integrity.interval_exp - 9) & 0xffffffff;
+#else
+ return -1U;
+#endif
+}
+
extern const struct blk_integrity_profile t10_pi_type1_crc;
extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+extern void t10_pi_prepare(struct request *rq, u8 protection_type);
+extern void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals);
+#else
+static inline void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals)
+{
+}
+static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 72705eaf4b84..263e37271afd 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -89,7 +89,7 @@ struct tcp_sack_block {
struct tcp_options_received {
/* PAWS/RTTM data */
- long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
+ int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
u32 ts_recent; /* Time stamp to echo next */
u32 rcv_tsval; /* Time stamp value */
u32 rcv_tsecr; /* Time stamp echo reply */
@@ -181,10 +181,16 @@ struct tcp_sock {
u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
* total number of data segments sent.
*/
+ u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
+ * total number of data bytes sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
+ * total number of DSACK blocks received
+ */
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -214,8 +220,7 @@ struct tcp_sock {
#define TCP_RACK_RECOVERY_THRESH 16
u8 reo_wnd_persist:5, /* No. of recovery since last adj */
dsack_seen:1, /* Whether DSACK seen after last adj */
- advanced:1, /* mstamp advanced since last lost marking */
- reord:1; /* reordering detected */
+ advanced:1; /* mstamp advanced since last lost marking */
} rack;
u16 advmss; /* Advertised MSS */
u8 compressed_ack;
@@ -261,6 +266,7 @@ struct tcp_sock {
u8 ecn_flags; /* ECN status bits. */
u8 keepalive_probes; /* num of allowed keep alive probes */
u32 reordering; /* Packet reordering metric. */
+ u32 reord_seen; /* number of data packet reordering events */
u32 snd_up; /* Urgent pointer */
/*
@@ -330,6 +336,9 @@ struct tcp_sock {
* the first SYN. */
u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
+ u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
+ * Total data bytes retransmitted
+ */
u32 total_retrans; /* Total retransmits for entire connection */
u32 urg_seq; /* Seq of received urgent pointer */
@@ -350,6 +359,7 @@ struct tcp_sock {
#endif
/* Receiver side RTT estimation */
+ u32 rcv_rtt_last_tsecr;
struct {
u32 rtt_us;
u32 seq;
@@ -425,7 +435,7 @@ struct tcp_timewait_sock {
/* The time we sent the last out-of-window ACK: */
u32 tw_last_oow_ack_time;
- long tw_ts_recent_stamp;
+ int tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index aed74463592d..27d83fd2ae61 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
int put_timespec64(const struct timespec64 *ts,
struct __kernel_timespec __user *uts);
int get_itimerspec64(struct itimerspec64 *it,
- const struct itimerspec __user *uit);
+ const struct __kernel_itimerspec __user *uit);
int put_itimerspec64(const struct itimerspec64 *it,
- struct itimerspec __user *uit);
+ struct __kernel_itimerspec __user *uit);
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 0a7b2f79cec7..05634afba0db 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
*/
#ifndef CONFIG_64BIT_TIME
#define __kernel_timespec timespec
+#define __kernel_itimerspec itimerspec
#endif
#include <uapi/linux/time.h>
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 86bc2026efce..5d738804e3d6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,21 @@ extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
/*
+ * ktime_get() family: read the current time in a multitude of ways,
+ *
+ * The default time reference is CLOCK_MONOTONIC, starting at
+ * boot time but not counting the time spent in suspend.
+ * For other references, use the functions with "real", "clocktai",
+ * "boottime" and "raw" suffixes.
+ *
+ * To get the time in a different format, use the ones wit
+ * "ns", "ts64" and "seconds" suffix.
+ *
+ * See Documentation/core-api/timekeeping.rst for more details.
+ */
+
+
+/*
* timespec64 based interfaces
*/
extern void ktime_get_raw_ts64(struct timespec64 *ts);
@@ -177,7 +192,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
-extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/*
* struct system_time_snapshot - simultaneous raw/real time capture with
@@ -243,7 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock64(struct timespec64 *ts);
+void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
/*
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 66272862070b..61dfd93b6ee4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -64,6 +64,8 @@ struct torture_random_state {
long trs_count;
};
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+ DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 06639fb6ab85..4609b94142d4 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -43,6 +43,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ int (*go_idle)(struct tpm_chip *chip);
+ int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
int (*relinquish_locality)(struct tpm_chip *chip, int loc);
void (*clk_enable)(struct tpm_chip *chip, bool value);
@@ -61,6 +63,7 @@ extern int tpm_seal_trusted(struct tpm_chip *chip,
extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
+extern struct tpm_chip *tpm_default_chip(void);
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
@@ -96,5 +99,9 @@ static inline int tpm_unseal_trusted(struct tpm_chip *chip,
{
return -ENODEV;
}
+static inline struct tpm_chip *tpm_default_chip(void)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 4a8841963c2e..05589a3e37f4 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,6 +51,7 @@
#include <linux/security.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
+#include <linux/blk-cgroup.h>
struct linux_binprm;
/*
@@ -192,6 +193,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
task_work_run();
mem_cgroup_handle_over_high();
+ blkcg_maybe_throttle_current();
}
#endif /* <linux/tracehook.h> */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ca840345571b..320d49d85484 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -74,8 +74,8 @@ struct udp_sock {
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
- struct sk_buff ** (*gro_receive)(struct sock *sk,
- struct sk_buff **head,
+ struct sk_buff * (*gro_receive)(struct sock *sk,
+ struct list_head *head,
struct sk_buff *skb);
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6c5f2074e14f..6f8b68cd460f 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -75,7 +75,7 @@ struct uio_device {
struct fasync_struct *async_queue;
wait_queue_head_t wait;
struct uio_info *info;
- spinlock_t info_lock;
+ struct mutex info_lock;
struct kobject *map_dir;
struct kobject *portio_dir;
};
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index a710e28b5215..6b708434b7f9 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -387,6 +387,12 @@ struct uac3_interrupt_data_msg {
#define UAC3_CONNECTORS 0x0f
#define UAC3_POWER_DOMAIN 0x10
+/* A.20 PROCESSING UNIT PROCESS TYPES */
+#define UAC3_PROCESS_UNDEFINED 0x00
+#define UAC3_PROCESS_UP_DOWNMIX 0x01
+#define UAC3_PROCESS_STEREO_EXTENDER 0x02
+#define UAC3_PROCESS_MULTI_FUNCTION 0x03
+
/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */
/* see audio-v2.h for the rest, which is identical to v2 */
#define UAC3_CS_REQ_INTEN 0x04
@@ -406,6 +412,15 @@ struct uac3_interrupt_data_msg {
#define UAC3_TE_OVERFLOW 0x04
#define UAC3_TE_LATENCY 0x05
+/* A.23.10 PROCESSING UNITS CONTROL SELECTROS */
+
+/* Up/Down Mixer */
+#define UAC3_UD_MODE_SELECT 0x01
+
+/* Stereo Extender */
+#define UAC3_EXT_WIDTH_CONTROL 0x01
+
+
/* BADD predefined Unit/Terminal values */
#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */
#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */
@@ -432,4 +447,8 @@ struct uac3_interrupt_data_msg {
/* BADD sample rate is always fixed to 48kHz */
#define UAC3_BADD_SAMPLING_RATE 48000
+/* BADD power domains recovery times in 50us increments */
+#define UAC3_BADD_PD_RECOVER_D1D0 0x0258 /* 30ms */
+#define UAC3_BADD_PD_RECOVER_D2D0 0x1770 /* 300ms */
+
#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 77f0f0af3a71..a34539b7f750 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -84,8 +84,8 @@ enum vga_switcheroo_state {
* Client identifier. Audio clients use the same identifier & 0x100.
*/
enum vga_switcheroo_client_id {
- VGA_SWITCHEROO_UNKNOWN_ID = -1,
- VGA_SWITCHEROO_IGD,
+ VGA_SWITCHEROO_UNKNOWN_ID = 0x1000,
+ VGA_SWITCHEROO_IGD = 0,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
@@ -151,7 +151,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id);
+ struct pci_dev *vga_dev);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
@@ -180,7 +180,7 @@ static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_ha
enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id) { return 0; }
+ struct pci_dev *vga_dev) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 5559a2d31c46..32baf8e26735 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,7 +79,8 @@ struct virtio_config_ops {
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
- int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
+ int (*set_vq_affinity)(struct virtqueue *vq,
+ const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
int index);
};
@@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev)
*
*/
static inline
-int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
+int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
{
struct virtio_device *vdev = vq->vdev;
if (vdev->config->set_vq_affinity)
- return vdev->config->set_vq_affinity(vq, cpu);
+ return vdev->config->set_vq_affinity(vq, cpu_mask);
return 0;
}
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 39fda195bf78..3af7c0e03be5 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -6,8 +6,10 @@
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
- * Wound/wait implementation:
+ * Wait/Die implementation:
* Copyright (C) 2013 Canonical Ltd.
+ * Choice of algorithm:
+ * Copyright (C) 2018 WMWare Inc.
*
* This file contains the main data structure and API definitions.
*/
@@ -23,14 +25,17 @@ struct ww_class {
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
+ unsigned int is_wait_die;
};
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
- unsigned acquired;
+ unsigned int acquired;
+ unsigned short wounded;
+ unsigned short is_wait_die;
#ifdef CONFIG_DEBUG_MUTEXES
- unsigned done_acquire;
+ unsigned int done_acquire;
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
@@ -38,8 +43,8 @@ struct ww_acquire_ctx {
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
- unsigned deadlock_inject_interval;
- unsigned deadlock_inject_countdown;
+ unsigned int deadlock_inject_interval;
+ unsigned int deadlock_inject_countdown;
#endif
};
@@ -58,17 +63,21 @@ struct ww_mutex {
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
#endif
-#define __WW_CLASS_INITIALIZER(ww_class) \
+#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
- , .mutex_name = #ww_class "_mutex" }
+ , .mutex_name = #ww_class "_mutex" \
+ , .is_wait_die = _is_wait_die }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = __MUTEX_INITIALIZER(lockname.base) \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
+#define DEFINE_WD_CLASS(classname) \
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
+
#define DEFINE_WW_CLASS(classname) \
- struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
@@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*
* Context-based w/w mutex acquiring can be done in any order whatsoever within
* a given lock class. Deadlocks will be detected and handled with the
- * wait/wound logic.
+ * wait/die logic.
*
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
* result in undetected deadlocks and is so forbidden. Mixing different contexts
@@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
+ ctx->wounded = false;
+ ctx->is_wait_die = ww_class->is_wait_die;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
@@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
* will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired.
*
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
* lock and proceed with trying to acquire further w/w mutexes (e.g. when
@@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
* will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
* signal arrives while waiting for the lock then this function returns -EINTR.
*
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
* not acquire this lock and proceed with trying to acquire further w/w mutexes
@@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
* will sleep until the lock becomes available.
*
* The caller must have released all w/w mutexes already acquired with the
@@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
* will sleep until the lock becomes available and returns 0 when the lock has
* been acquired. If a signal arrives while waiting for the lock then this
* function returns -EINTR.
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index ed16c6dde0ba..604e79cb6cbf 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -25,6 +25,9 @@
* @read_hpd: read the HPD pin. Return true if high, false if low or
* an error if negative. If NULL or -ENOTTY is returned,
* then this is not supported.
+ * @read_5v: read the 5V pin. Return true if high, false if low or
+ * an error if negative. If NULL or -ENOTTY is returned,
+ * then this is not supported.
*
* These operations are used by the cec pin framework to manipulate
* the CEC pin.
@@ -38,6 +41,7 @@ struct cec_pin_ops {
void (*free)(struct cec_adapter *adap);
void (*status)(struct cec_adapter *adap, struct seq_file *file);
int (*read_hpd)(struct cec_adapter *adap);
+ int (*read_5v)(struct cec_adapter *adap);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 580ab1042898..ff9847f7f99d 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -79,7 +79,7 @@ struct cec_event_entry {
};
#define CEC_NUM_CORE_EVENTS 2
-#define CEC_NUM_EVENTS CEC_EVENT_PIN_HPD_HIGH
+#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH
struct cec_fh {
struct list_head list;
@@ -309,6 +309,16 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
/**
+ * cec_queue_pin_5v_event() - queue a pin event with a given timestamp.
+ *
+ * @adap: pointer to the cec adapter
+ * @is_high: when true the 5V pin is high, otherwise it is low
+ * @ts: the timestamp for this event
+ *
+ */
+void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
+
+/**
* cec_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 331c8269c00e..6f7a85ab3541 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -52,6 +52,10 @@
*/
#define MAX_DELSYS 8
+/* Helper definitions to be used at frontend drivers */
+#define kHz 1000UL
+#define MHz 1000000UL
+
/**
* struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
*
@@ -73,22 +77,19 @@ struct dvb_frontend;
* struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
*
* @name: name of the Frontend
- * @frequency_min: minimal frequency supported
- * @frequency_max: maximum frequency supported
- * @frequency_step: frequency step
+ * @frequency_min_hz: minimal frequency supported in Hz
+ * @frequency_max_hz: maximum frequency supported in Hz
+ * @frequency_step_hz: frequency step in Hz
* @bandwidth_min: minimal frontend bandwidth supported
* @bandwidth_max: maximum frontend bandwidth supported
* @bandwidth_step: frontend bandwidth step
- *
- * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
- * satellite.
*/
struct dvb_tuner_info {
char name[128];
- u32 frequency_min;
- u32 frequency_max;
- u32 frequency_step;
+ u32 frequency_min_hz;
+ u32 frequency_max_hz;
+ u32 frequency_step_hz;
u32 bandwidth_min;
u32 bandwidth_max;
@@ -316,6 +317,34 @@ struct analog_demod_ops {
struct dtv_frontend_properties;
+/**
+ * struct dvb_frontend_internal_info - Frontend properties and capabilities
+ *
+ * @name: Name of the frontend
+ * @frequency_min_hz: Minimal frequency supported by the frontend.
+ * @frequency_max_hz: Minimal frequency supported by the frontend.
+ * @frequency_stepsize_hz: All frequencies are multiple of this value.
+ * @frequency_tolerance_hz: Frequency tolerance.
+ * @symbol_rate_min: Minimal symbol rate, in bauds
+ * (for Cable/Satellite systems).
+ * @symbol_rate_max: Maximal symbol rate, in bauds
+ * (for Cable/Satellite systems).
+ * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm
+ * (for Cable/Satellite systems).
+ * @caps: Capabilities supported by the frontend,
+ * as specified in &enum fe_caps.
+ */
+struct dvb_frontend_internal_info {
+ char name[128];
+ u32 frequency_min_hz;
+ u32 frequency_max_hz;
+ u32 frequency_stepsize_hz;
+ u32 frequency_tolerance_hz;
+ u32 symbol_rate_min;
+ u32 symbol_rate_max;
+ u32 symbol_rate_tolerance;
+ enum fe_caps caps;
+};
/**
* struct dvb_frontend_ops - Demodulation information and callbacks for
@@ -403,7 +432,7 @@ struct dtv_frontend_properties;
* @analog_ops: pointer to &struct analog_demod_ops
*/
struct dvb_frontend_ops {
- struct dvb_frontend_info info;
+ struct dvb_frontend_internal_info info;
u8 delsys[MAX_DELSYS];
diff --git a/include/media/i2c/lm3560.h b/include/media/i2c/lm3560.h
index a5bd310c9e1e..0e2b1c751a5d 100644
--- a/include/media/i2c/lm3560.h
+++ b/include/media/i2c/lm3560.h
@@ -22,6 +22,7 @@
#include <media/v4l2-subdev.h>
+#define LM3559_NAME "lm3559"
#define LM3560_NAME "lm3560"
#define LM3560_I2C_ADDR (0x53)
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 160bca96d524..cdc87ec61e54 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -338,7 +338,7 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
({ \
BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
sizeof((array)->height_field) != sizeof(u32)); \
- (typeof(&(*(array))))__v4l2_find_nearest_size( \
+ (typeof(&(array)[0]))__v4l2_find_nearest_size( \
(array), array_size, sizeof(*(array)), \
offsetof(typeof(*(array)), width_field), \
offsetof(typeof(*(array)), height_field), \
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 5b445b5654f7..f615ba1b29dd 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -181,10 +181,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
- * @p_cur: The control's current value represented via a union with
+ * @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
- * @p_new: The control's new value represented via a union with provides
+ * @p_new: The control's new value represented via a union which provides
* a standard way of accessing control types
* through a pointer.
*/
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index a8dbf5b54b5c..5848d92c30da 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -621,7 +621,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id);
* v4l2_video_std_frame_period - Ancillary routine that fills a
* struct &v4l2_fract pointer with the default framerate fraction.
*
- * @id: analog TV sdandard ID.
+ * @id: analog TV standard ID.
* @frameperiod: struct &v4l2_fract pointer to be filled
*
*/
@@ -632,7 +632,7 @@ void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
* a &v4l2_standard structure according to the @id parameter.
*
* @vs: struct &v4l2_standard pointer to be filled
- * @id: analog TV sdandard ID.
+ * @id: analog TV standard ID.
* @name: name of the standard to be used
*
* .. note::
@@ -643,6 +643,17 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
int id, const char *name);
/**
+ * v4l_video_std_enumstd - Ancillary routine that fills in the fields of
+ * a &v4l2_standard structure according to the @id and @vs->index
+ * parameters.
+ *
+ * @vs: struct &v4l2_standard pointer to be filled.
+ * @id: analog TV standard ID.
+ *
+ */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id);
+
+/**
* v4l_printk_ioctl - Ancillary routine that prints the ioctl in a
* human-readable format.
*
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 4d8626c468bc..4bbb5f3d2b02 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -45,6 +45,8 @@
/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
+#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(14)
+#define V4L2_MBUS_DATA_ENABLE_LOW BIT(15)
/* Serial flags */
/* How many lanes the client can use */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3d07ba3a8262..d655720e16a1 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -32,7 +32,7 @@
* assumed that one source and one destination buffer are all
* that is required for the driver to perform one full transaction.
* This method may not sleep.
- * @job_abort: required. Informs the driver that it has to abort the currently
+ * @job_abort: optional. Informs the driver that it has to abort the currently
* running transaction as soon as possible (i.e. as soon as it can
* stop the device safely; e.g. in the next interrupt handler),
* even if the transaction would not have been finished by then.
@@ -40,19 +40,14 @@
* v4l2_m2m_job_finish() (as if the transaction ended normally).
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
- * @lock: optional. Define a driver's own lock callback, instead of using
- * &v4l2_m2m_ctx->q_lock.
- * @unlock: optional. Define a driver's own unlock callback, instead of
- * using &v4l2_m2m_ctx->q_lock.
*/
struct v4l2_m2m_ops {
void (*device_run)(void *priv);
int (*job_ready)(void *priv);
void (*job_abort)(void *priv);
- void (*lock)(void *priv);
- void (*unlock)(void *priv);
};
+struct video_device;
struct v4l2_m2m_dev;
/**
@@ -328,6 +323,24 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
*/
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function);
+#else
+static inline void
+v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+}
+
+static inline int
+v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ return 0;
+}
+#endif
+
/**
* v4l2_m2m_release() - cleans up and frees a m2m_dev structure
*
@@ -437,6 +450,35 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
+ * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
+void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
+
+/**
+ * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
+ * ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
+}
+
+/**
+ * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
+ * ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
+}
+
+/**
* v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
* buffers
*
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 678c24de1ac6..3093b9cb9067 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -25,6 +25,7 @@ int vsp1_du_init(struct device *dev);
* struct vsp1_du_lif_config - VSP LIF configuration
* @width: output frame width
* @height: output frame height
+ * @interlaced: true for interlaced pipelines
* @callback: frame completion callback function (optional). When a callback
* is provided, the VSP driver guarantees that it will be called once
* and only once for each vsp1_du_atomic_flush() call.
@@ -33,6 +34,7 @@ int vsp1_du_init(struct device *dev);
struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
+ bool interlaced;
void (*callback)(void *data, bool completed, u32 crc);
void *callback_data;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9e59ebfded62..1ad5b19e83a9 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -6,6 +6,7 @@
* Public action API for classifiers/qdiscs
*/
+#include <linux/refcount.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -26,8 +27,8 @@ struct tc_action {
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
- int tcfa_refcnt;
- int tcfa_bindcnt;
+ refcount_t tcfa_refcnt;
+ atomic_t tcfa_bindcnt;
u32 tcfa_capab;
int tcfa_action;
struct tcf_t tcfa_tm;
@@ -37,7 +38,7 @@ struct tc_action {
spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct tc_cookie *act_cookie;
+ struct tc_cookie __rcu *act_cookie;
struct tcf_chain *goto_chain;
};
#define tcf_index common.tcfa_index
@@ -84,14 +85,15 @@ struct tc_action_ops {
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
- struct tcf_result *);
+ struct tcf_result *); /* called under RCU BH lock*/
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *net, struct tc_action **a, u32 index,
struct netlink_ext_ack *extack);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
- int bind, struct netlink_ext_ack *extack);
+ int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
@@ -99,6 +101,8 @@ struct tc_action_ops {
void (*stats_update)(struct tc_action *, u64, u32, u64);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
+ void (*put_dev)(struct net_device *dev);
+ int (*delete)(struct net *net, u32 index);
};
struct tc_action_net {
@@ -151,6 +155,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
int bind, bool cpustats);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind)
@@ -161,18 +169,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind)
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
-int tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct list_head *actions, size_t *attr_size,
- struct netlink_ext_ack *extack);
+ struct tc_action *actions[], size_t *attr_size,
+ bool rtnl_held, struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
+ bool rtnl_held,
struct netlink_ext_ack *extack);
-int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ int ref);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
@@ -190,9 +200,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
#endif
}
-typedef int tc_setup_cb_t(enum tc_setup_type type,
- void *type_data, void *cb_priv);
-
#ifdef CONFIG_NET_CLS_ACT
int tc_setup_cb_egdev_register(const struct net_device *dev,
tc_setup_cb_t *cb, void *cb_priv);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 5f43f7a70fe6..6def0351bcc3 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -108,6 +108,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard);
+bool inet_rcv_saddr_any(const struct sock *sk);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index a5563d27a3eb..8003a9f6eb43 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -56,6 +56,7 @@ struct sockaddr_ieee802154 {
#define WPAN_WANTACK 0
#define WPAN_SECURITY 1
#define WPAN_SECURITY_LEVEL 2
+#define WPAN_WANTLQI 3
#define WPAN_SECURITY_DEFAULT 0
#define WPAN_SECURITY_OFF 1
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 8ae8ee004258..f53edb3754bc 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
- void *, size_t, size_t *, bool, u32 *, u16 *);
+ struct iov_iter *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 9324ac2d9ff2..43913ae79f64 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -64,7 +64,8 @@ struct vsock_sock {
struct list_head pending_links;
struct list_head accept_queue;
bool rejected;
- struct delayed_work dwork;
+ struct delayed_work connect_work;
+ struct delayed_work pending_work;
struct delayed_work close_work;
bool close_work_scheduled;
u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 53ce8176c313..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 1668211297a9..cdd9f1fe7cfa 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -183,6 +183,15 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_NON_PERSISTENT_DIAG,
+
+ /* When this quirk is set, setup() would be run after every
+ * open() and not just after the first open().
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ *
+ */
+ HCI_QUIRK_NON_PERSISTENT_SETUP,
};
/* HCI device flags */
@@ -260,6 +269,7 @@ enum {
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
+ HCI_LL_RPA_RESOLUTION,
__HCI_NUM_FLAGS,
};
@@ -291,6 +301,14 @@ enum {
#define HCI_DH3 0x0800
#define HCI_DH5 0x8000
+/* HCI packet types inverted masks */
+#define HCI_2DH1 0x0002
+#define HCI_3DH1 0x0004
+#define HCI_2DH3 0x0100
+#define HCI_3DH3 0x0200
+#define HCI_2DH5 0x1000
+#define HCI_3DH5 0x2000
+
#define HCI_HV1 0x0020
#define HCI_HV2 0x0040
#define HCI_HV3 0x0080
@@ -354,6 +372,8 @@ enum {
#define LMP_PCONTROL 0x04
#define LMP_TRANSPARENT 0x08
+#define LMP_EDR_2M 0x02
+#define LMP_EDR_3M 0x04
#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
@@ -361,7 +381,9 @@ enum {
#define LMP_EV5 0x02
#define LMP_NO_BREDR 0x20
#define LMP_LE 0x40
+#define LMP_EDR_3SLOT 0x80
+#define LMP_EDR_5SLOT 0x01
#define LMP_SNIFF_SUBR 0x02
#define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20
@@ -398,7 +420,12 @@ enum {
#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
+#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */
@@ -1490,6 +1517,16 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time;
} __packed;
+#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
+
+#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
+struct hci_rp_le_read_resolv_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
+
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
@@ -1506,6 +1543,134 @@ struct hci_cp_le_set_default_phy {
__u8 rx_phys;
} __packed;
+#define HCI_LE_SET_PHY_1M 0x01
+#define HCI_LE_SET_PHY_2M 0x02
+#define HCI_LE_SET_PHY_CODED 0x04
+
+#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
+struct hci_cp_le_set_ext_scan_params {
+ __u8 own_addr_type;
+ __u8 filter_policy;
+ __u8 scanning_phys;
+ __u8 data[0];
+} __packed;
+
+#define LE_SCAN_PHY_1M 0x01
+#define LE_SCAN_PHY_2M 0x02
+#define LE_SCAN_PHY_CODED 0x04
+
+struct hci_cp_le_scan_phy_params {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
+struct hci_cp_le_set_ext_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+ __le16 duration;
+ __le16 period;
+} __packed;
+
+#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
+struct hci_cp_le_ext_create_conn {
+ __u8 filter_policy;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 phys;
+ __u8 data[0];
+} __packed;
+
+struct hci_cp_le_ext_conn_param {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
+struct hci_rp_le_read_num_supported_adv_sets {
+ __u8 status;
+ __u8 num_of_sets;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
+struct hci_cp_le_set_ext_adv_params {
+ __u8 handle;
+ __le16 evt_properties;
+ __u8 min_interval[3];
+ __u8 max_interval[3];
+ __u8 channel_map;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 filter_policy;
+ __u8 tx_power;
+ __u8 primary_phy;
+ __u8 secondary_max_skip;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 notif_enable;
+} __packed;
+
+#define HCI_ADV_PHY_1M 0X01
+#define HCI_ADV_PHY_2M 0x02
+#define HCI_ADV_PHY_CODED 0x03
+
+struct hci_rp_le_set_ext_adv_params {
+ __u8 status;
+ __u8 tx_power;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
+struct hci_cp_le_set_ext_adv_enable {
+ __u8 enable;
+ __u8 num_of_sets;
+ __u8 data[0];
+} __packed;
+
+struct hci_cp_ext_adv_set {
+ __u8 handle;
+ __le16 duration;
+ __u8 max_events;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
+struct hci_cp_le_set_ext_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
+struct hci_cp_le_set_ext_scan_rsp_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
+
+#define LE_SET_ADV_DATA_NO_FRAG 0x01
+
+#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
+
+#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
+struct hci_cp_le_set_adv_set_rand_addr {
+ __u8 handle;
+ bdaddr_t bdaddr;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1893,6 +2058,23 @@ struct hci_ev_le_conn_complete {
#define LE_ADV_SCAN_IND 0x02
#define LE_ADV_NONCONN_IND 0x03
#define LE_ADV_SCAN_RSP 0x04
+#define LE_ADV_INVALID 0x05
+
+/* Legacy event types in extended adv report */
+#define LE_LEGACY_ADV_IND 0x0013
+#define LE_LEGACY_ADV_DIRECT_IND 0x0015
+#define LE_LEGACY_ADV_SCAN_IND 0x0012
+#define LE_LEGACY_NONCONN_IND 0x0010
+#define LE_LEGACY_SCAN_RSP_ADV 0x001b
+#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
+
+/* Extended Advertising event types */
+#define LE_EXT_ADV_NON_CONN_IND 0x0000
+#define LE_EXT_ADV_CONN_IND 0x0001
+#define LE_EXT_ADV_SCAN_IND 0x0002
+#define LE_EXT_ADV_DIRECT_IND 0x0004
+#define LE_EXT_ADV_SCAN_RSP 0x0008
+#define LE_EXT_ADV_LEGACY_PDU 0x0010
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
@@ -1957,6 +2139,48 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
+struct hci_ev_le_ext_adv_report {
+ __le16 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 primary_phy;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 tx_power;
+ __s8 rssi;
+ __le16 interval;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
+#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
+struct hci_ev_le_enh_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ bdaddr_t local_rpa;
+ bdaddr_t peer_rpa;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
+struct hci_evt_le_ext_adv_set_term {
+ __u8 status;
+ __u8 handle;
+ __le16 conn_handle;
+ __u8 num_evts;
+} __packed;
+
+#define HCI_EV_VENDOR 0xff
+
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 893bbbb5d2fa..0db1b9b428b7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -171,6 +171,10 @@ struct adv_info {
__u8 adv_data[HCI_MAX_AD_LENGTH];
__u16 scan_rsp_len;
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __s8 tx_power;
+ bdaddr_t random_addr;
+ bool rpa_expired;
+ struct delayed_work rpa_expired_cb;
};
#define HCI_MAX_ADV_INSTANCES 5
@@ -221,6 +225,8 @@ struct hci_dev {
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
__u8 le_white_list_size;
+ __u8 le_resolv_list_size;
+ __u8 le_num_of_adv_sets;
__u8 le_states[8];
__u8 commands[64];
__u8 hci_ver;
@@ -314,6 +320,9 @@ struct hci_dev {
unsigned long sco_last_tx;
unsigned long le_last_tx;
+ __u8 le_tx_def_phys;
+ __u8 le_rx_def_phys;
+
struct workqueue_struct *workqueue;
struct workqueue_struct *req_workqueue;
@@ -367,6 +376,7 @@ struct hci_dev {
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
struct list_head le_white_list;
+ struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
@@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
+#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
+#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
+#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
+#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
@@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
+#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
+
+#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
+
+#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+
+/* Use ext scanning if set ext scan param and ext scan enable is supported */
+#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
+ ((dev)->commands[37] & 0x40))
+/* Use ext create connection if command is supported */
+#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+
+/* Extended advertising support */
+#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
+int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e7303eee65cd..9cee7ddc6741 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
+#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_TX_POWER BIT(4)
#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+#define MGMT_ADV_FLAG_SEC_1M BIT(7)
+#define MGMT_ADV_FLAG_SEC_2M BIT(8)
+#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+
+#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
+ MGMT_ADV_FLAG_SEC_CODED)
#define MGMT_OP_REMOVE_ADVERTISING 0x003F
struct mgmt_cp_remove_advertising {
@@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
} __packed;
#define MGMT_SET_APPEARANCE_SIZE 2
+#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
+struct mgmt_rp_get_phy_confguration {
+ __le32 supported_phys;
+ __le32 configurable_phys;
+ __le32 selected_phys;
+} __packed;
+#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
+
+#define MGMT_PHY_BR_1M_1SLOT 0x00000001
+#define MGMT_PHY_BR_1M_3SLOT 0x00000002
+#define MGMT_PHY_BR_1M_5SLOT 0x00000004
+#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
+#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
+#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
+#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
+#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
+#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
+#define MGMT_PHY_LE_1M_TX 0x00000200
+#define MGMT_PHY_LE_1M_RX 0x00000400
+#define MGMT_PHY_LE_2M_TX 0x00000800
+#define MGMT_PHY_LE_2M_RX 0x00001000
+#define MGMT_PHY_LE_CODED_TX 0x00002000
+#define MGMT_PHY_LE_CODED_RX 0x00004000
+
+#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
+ MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
+ MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
+ MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
+ MGMT_PHY_EDR_3M_5SLOT)
+#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
+ MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
+#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
+ MGMT_PHY_LE_CODED_TX)
+#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_RX)
+
+#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
+struct mgmt_cp_set_phy_confguration {
+ __le32 selected_phys;
+} __packed;
+#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
__le16 eir_len;
__u8 eir[0];
} __packed;
+
+#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
+struct mgmt_ev_phy_configuration_changed {
+ __le32 selected_phys;
+} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f358ad5e4214..fc3111515f5c 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
"none",
"unknown"
};
- int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+ int max_size = ARRAY_SIZE(churn_description);
if (state >= max_size)
state = max_size - 1;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 808f1d167349..a2d058170ea3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
bond_is_active_slave(slave);
}
+static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
+{
+ struct slave *slave;
+ bool active;
+
+ rcu_read_lock();
+ slave = bond_slave_get_rcu(slave_dev);
+ active = bond_is_active_slave(slave);
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
{
if (len == ETH_ALEN) {
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c5187438af38..ba61cdd09eaa 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif
}
-static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
-{
- if (sk_can_busy_loop(sock->sk) &&
- events && (events & POLL_BUSY_LOOP)) {
- /* once, only if requested by syscall */
- sk_busy_loop(sock->sk, 1);
- }
-}
-
-/* if this socket can poll_ll, tell the system call */
-static inline __poll_t sock_poll_busy_flag(struct socket *sock)
-{
- return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
-}
-
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
@@ -151,6 +136,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = skb->napi_id;
#endif
+ sk_rx_queue_set(sk, skb);
}
/* variant used for unconnected sockets */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5fbfe61f41c6..9a850973e09a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap {
struct ieee80211_vht_mcs_info vht_mcs;
};
+#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
+
+/**
+ * struct ieee80211_sta_he_cap - STA's HE capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ax HE capabilities for a STA.
+ *
+ * @has_he: true iff HE data is valid.
+ * @he_cap_elem: Fixed portion of the HE capabilities element.
+ * @he_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_he_cap {
+ bool has_he;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_sband_iftype_data
+ *
+ * This structure encapsulates sband data that is relevant for the
+ * interface types defined in @types_mask. Each type in the
+ * @types_mask must be unique across all instances of iftype_data.
+ *
+ * @types_mask: interface types mask
+ * @he_cap: holds the HE capabilities
+ */
+struct ieee80211_sband_iftype_data {
+ u16 types_mask;
+ struct ieee80211_sta_he_cap he_cap;
+};
+
/**
* struct ieee80211_supported_band - frequency band definition
*
@@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @n_iftype_data: number of iftype data entries
+ * @iftype_data: interface type data entries. Note that the bits in
+ * @types_mask inside this structure cannot overlap (i.e. only
+ * one occurrence of each type is allowed across all instances of
+ * iftype_data).
*/
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
@@ -310,9 +350,56 @@ struct ieee80211_supported_band {
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u16 n_iftype_data;
+ const struct ieee80211_sband_iftype_data *iftype_data;
};
/**
+ * ieee80211_get_sband_iftype_data - return sband data for a given iftype
+ * @sband: the sband to search for the STA on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
+ */
+static inline const struct ieee80211_sband_iftype_data *
+ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ u8 iftype)
+{
+ int i;
+
+ if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ return NULL;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *data =
+ &sband->iftype_data[i];
+
+ if (data->types_mask & BIT(iftype))
+ return data;
+ }
+
+ return NULL;
+}
+
+/**
+ * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
+ * @sband: the sband to search for the STA on
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION);
+
+ if (data && data->he_cap.has_he)
+ return &data->he_cap;
+
+ return NULL;
+}
+
+/**
* wiphy_read_of_freq_limits - read frequency limits from device tree
*
* @wiphy: the wireless device to get extra limits for
@@ -899,6 +986,8 @@ enum station_parameters_apply_mask {
* @opmode_notif: operating mode field from Operating Mode Notification
* @opmode_notif_used: information if operating mode field is used
* @support_p2p_ps: information if station supports P2P PS mechanism
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
*/
struct station_parameters {
const u8 *supported_rates;
@@ -926,6 +1015,8 @@ struct station_parameters {
u8 opmode_notif;
bool opmode_notif_used;
int support_p2p_ps;
+ const struct ieee80211_he_cap_elem *he_capa;
+ u8 he_capa_len;
};
/**
@@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
* @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
RATE_INFO_FLAGS_SHORT_GI = BIT(2),
RATE_INFO_FLAGS_60G = BIT(3),
+ RATE_INFO_FLAGS_HE_MCS = BIT(4),
};
/**
@@ -1019,6 +1112,7 @@ enum rate_info_flags {
* @RATE_INFO_BW_40: 40 MHz bandwidth
* @RATE_INFO_BW_80: 80 MHz bandwidth
* @RATE_INFO_BW_160: 160 MHz bandwidth
+ * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
@@ -1027,6 +1121,7 @@ enum rate_info_bw {
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
+ RATE_INFO_BW_HE_RU,
};
/**
@@ -1035,10 +1130,14 @@ enum rate_info_bw {
* Information about a receiving or transmitting bitrate
*
* @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes a 802.11n bitrate
+ * @mcs: mcs index if struct describes an HT/VHT/HE rate
* @legacy: bitrate in 100kbit/s for 802.11abg
- * @nss: number of streams (VHT only)
+ * @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
+ * @he_gi: HE guard interval (from &enum nl80211_he_gi)
+ * @he_dcm: HE DCM value
+ * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_HE_RU)
*/
struct rate_info {
u8 flags;
@@ -1046,6 +1145,9 @@ struct rate_info {
u16 legacy;
u8 nss;
u8 bw;
+ u8 he_gi;
+ u8 he_dcm;
+ u8 he_ru_alloc;
};
/**
@@ -5835,10 +5937,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
/**
* cfg80211_rx_control_port - notification about a received control port frame
* @dev: The device the frame matched to
- * @buf: control port frame
- * @len: length of the frame data
- * @addr: The peer from which the frame was received
- * @proto: frame protocol, typically PAE or Pre-authentication
+ * @skb: The skbuf with the control port frame. It is assumed that the skbuf
+ * is 802.3 formatted (with 802.3 header). The skb can be non-linear.
+ * This function does not take ownership of the skb, so the caller is
+ * responsible for any cleanup. The caller must also ensure that
+ * skb->protocol is set appropriately.
* @unencrypted: Whether the frame was received unencrypted
*
* This function is used to inform userspace about a received control port
@@ -5851,8 +5954,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
* Return: %true if the frame was passed to userspace
*/
bool cfg80211_rx_control_port(struct net_device *dev,
- const u8 *buf, size_t len,
- const u8 *addr, u16 proto, bool unencrypted);
+ struct sk_buff *skb, bool unencrypted);
/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 0e5e91be2d30..e22a8a3c089b 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+struct dcb_ieee_app_prio_map {
+ u64 map[IEEE_8021QAZ_MAX_TCS];
+};
+void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
+struct dcb_ieee_app_dscp_map {
+ u8 map[64];
+};
+void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_dscp_map *p_map);
+u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
+
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 pid);
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index e336ea9c73df..b9b89d6604d4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -27,6 +27,9 @@ struct devlink {
struct list_head sb_list;
struct list_head dpipe_table_list;
struct list_head resource_list;
+ struct list_head param_list;
+ struct list_head region_list;
+ u32 snapshot_id;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
@@ -295,6 +298,115 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+enum devlink_param_type {
+ DEVLINK_PARAM_TYPE_U8,
+ DEVLINK_PARAM_TYPE_U16,
+ DEVLINK_PARAM_TYPE_U32,
+ DEVLINK_PARAM_TYPE_STRING,
+ DEVLINK_PARAM_TYPE_BOOL,
+};
+
+union devlink_param_value {
+ u8 vu8;
+ u16 vu16;
+ u32 vu32;
+ const char *vstr;
+ bool vbool;
+};
+
+struct devlink_param_gset_ctx {
+ union devlink_param_value val;
+ enum devlink_param_cmode cmode;
+};
+
+/**
+ * struct devlink_param - devlink configuration parameter data
+ * @name: name of the parameter
+ * @generic: indicates if the parameter is generic or driver specific
+ * @type: parameter type
+ * @supported_cmodes: bitmap of supported configuration modes
+ * @get: get parameter value, used for runtime and permanent
+ * configuration modes
+ * @set: set parameter value, used for runtime and permanent
+ * configuration modes
+ * @validate: validate input value is applicable (within value range, etc.)
+ *
+ * This struct should be used by the driver to fill the data for
+ * a parameter it registers.
+ */
+struct devlink_param {
+ u32 id;
+ const char *name;
+ bool generic;
+ enum devlink_param_type type;
+ unsigned long supported_cmodes;
+ int (*get)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*set)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*validate)(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+};
+
+struct devlink_param_item {
+ struct list_head list;
+ const struct devlink_param *param;
+ union devlink_param_value driverinit_value;
+ bool driverinit_value_valid;
+};
+
+enum devlink_param_generic_id {
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+
+ /* add new param generic ids above here*/
+ __DEVLINK_PARAM_GENERIC_ID_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
+};
+
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
+{ \
+ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
+ .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
+ .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
+ .generic = true, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \
+{ \
+ .id = _id, \
+ .name = _name, \
+ .type = _type, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+struct devlink_region;
+
+typedef void devlink_snapshot_data_dest_t(const void *data);
+
struct devlink_ops {
int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
@@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink,
void *occ_get_priv);
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val);
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+struct devlink_region *devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size);
+void devlink_region_destroy(struct devlink_region *region);
+u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor);
#else
@@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink,
{
}
+static inline int
+devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return 0;
+}
+
+static inline void
+devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+
+}
+
+static inline int
+devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+}
+
+static inline struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size)
+{
+ return NULL;
+}
+
+static inline void
+devlink_region_destroy(struct devlink_region *region)
+{
+}
+
+static inline u32
+devlink_region_shapshot_id_get(struct devlink *devlink)
+{
+ return 0;
+}
+
+static inline int
+devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor)
+{
+ return 0;
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fdbd6082945d..461e8a7661b7 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -259,6 +259,9 @@ struct dsa_switch {
/* Number of switch port queues */
unsigned int num_tx_queues;
+ unsigned long *bitmap;
+ unsigned long _bitmap;
+
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];
diff --git a/include/net/dst.h b/include/net/dst.h
index b3219cd8a5a1..7f735e76ca73 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
return dst_orig;
}
+static inline struct dst_entry *
+xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags, u32 if_id)
+{
+ return dst_orig;
+}
+
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
@@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
+struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk, int flags,
+ u32 if_id);
+
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index adc24df56b90..6a4586dcdede 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -47,7 +47,7 @@ struct flow_dissector_key_tags {
struct flow_dissector_key_vlan {
u16 vlan_id:12,
vlan_priority:3;
- u16 padding;
+ __be16 vlan_tpid;
};
struct flow_dissector_key_mpls {
@@ -57,6 +57,21 @@ struct flow_dissector_key_mpls {
mpls_label:20;
};
+#define FLOW_DIS_TUN_OPTS_MAX 255
+/**
+ * struct flow_dissector_key_enc_opts:
+ * @data: tunnel option data
+ * @len: length of tunnel option data
+ * @dst_opt_type: tunnel option type
+ */
+struct flow_dissector_key_enc_opts {
+ u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired
+ * here but seems difficult to #include
+ */
+ u8 len;
+ __be16 dst_opt_type;
+};
+
struct flow_dissector_key_keyid {
__be32 keyid;
};
@@ -206,6 +221,9 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
+ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
FLOW_DISSECTOR_KEY_MAX,
};
@@ -237,6 +255,7 @@ struct flow_keys {
struct flow_dissector_key_basic basic;
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 0304ba2ae353..883bb9085f15 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -59,13 +59,13 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 960236fb1681..feef706e1158 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence {
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
IEEE80211_RADIOTAP_TIMESTAMP = 22,
+ IEEE80211_RADIOTAP_HE = 23,
+ IEEE80211_RADIOTAP_HE_MU = 24,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags {
IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
};
+struct ieee80211_radiotap_he {
+ __le16 data1, data2, data3, data4, data5, data6;
+};
+
+enum ieee80211_radiotap_he_bits {
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
+
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
+
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
+
+ IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
+
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
+ IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
+ IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
+};
+
+struct ieee80211_radiotap_he_mu {
+ __le16 flags1, flags2;
+ u8 ru_ch1[4];
+ u8 ru_ch2[4];
+};
+
+enum ieee80211_radiotap_he_mu_bits {
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
+};
+
/**
* ieee80211_get_radiotap_len - get radiotap header length
*/
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 384b90c62c0b..3ca969cbd161 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
-struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
int inet_gro_complete(struct sk_buff *skb, int nhoff);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0a6c9e0f2b5a..371b3b45fd5c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/poll.h>
+#include <linux/kernel.h>
#include <net/inet_sock.h>
#include <net/request_sock.h>
@@ -167,7 +168,8 @@ enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
- ICSK_ACK_PUSHED2 = 8
+ ICSK_ACK_PUSHED2 = 8,
+ ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
};
void inet_csk_init_xmit_timers(struct sock *sk,
@@ -224,7 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
if (when > max_when) {
pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
- sk, what, when, current_text_addr());
+ sk, what, when, (void *)_THIS_IP_);
when = max_when;
}
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index ed07e3786d98..1662cbc0b46b 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -2,7 +2,7 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
struct netns_frags {
/* sysctls */
@@ -57,7 +57,9 @@ struct frag_v6_compare_key {
* @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
* @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
@@ -75,8 +77,10 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
- struct sk_buff *fragments;
+ struct sk_buff *fragments; /* Used in IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4. */
struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
@@ -112,6 +116,9 @@ void inet_frag_kill(struct inet_frag_queue *q);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
+
static inline void inet_frag_put(struct inet_frag_queue *q)
{
if (refcount_dec_and_test(&q->refcnt))
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83d5b3c2ac42..e03b93360f33 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -148,6 +148,7 @@ struct inet_cork {
__s16 tos;
char priority;
__u16 gso_size;
+ u64 transmit_time;
};
struct inet_cork_full {
@@ -358,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk)
return !!inet_sk(sk)->convert_csum;
}
+
+static inline bool inet_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv4.sysctl_ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 0d2281b4b27a..e44b1a44f67a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -72,13 +72,27 @@ struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options_rcu *opt;
- __u8 tx_flags;
__u8 ttl;
__s16 tos;
char priority;
__u16 gso_size;
};
+static inline void ipcm_init(struct ipcm_cookie *ipcm)
+{
+ *ipcm = (struct ipcm_cookie) { .tos = -1 };
+}
+
+static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ const struct inet_sock *inet)
+{
+ ipcm_init(ipcm);
+
+ ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->oif = inet->sk.sk_bound_dev_if;
+ ipcm->addr = inet->inet_saddr;
+}
+
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
@@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
struct ip_options_rcu *opt);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
-int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ __u8 tos);
void ip_init(void);
int ip_append_data(struct sock *sk, struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset, int len,
@@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
+static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
+ struct flowi *fl)
+{
+ return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5cba71d2dc44..3d4930528db0 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -170,6 +170,7 @@ struct fib6_info {
unused:3;
struct fib6_nh fib6_nh;
+ struct rcu_head rcu;
};
struct rt6_info {
@@ -273,17 +274,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
}
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
static inline void fib6_info_hold(struct fib6_info *f6i)
{
atomic_inc(&f6i->fib6_ref);
}
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+ return atomic_inc_not_zero(&f6i->fib6_ref);
+}
+
static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
- fib6_info_destroy(f6i);
+ call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
enum fib6_walk_state {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 59656fc580df..7b9c82de11cc 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+ return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+ RTF_GATEWAY;
+}
+
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 90ff430f5e9d..b0d022ff6ea1 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to,
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
- const void *from, int len)
+ const void *from, int len,
+ __be16 flags)
{
memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
+ info->key.tun_flags |= flags;
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to,
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
- const void *from, int len)
+ const void *from, int len,
+ __be16 flags)
{
info->options_len = 0;
+ info->key.tun_flags |= flags;
}
#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0bec23c6d5e..a0d2e0bb9a94 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -335,6 +335,11 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
+/* Connection templates use bits from state */
+#define IP_VS_CTPL_S_NONE 0x0000
+#define IP_VS_CTPL_S_ASSURED 0x0001
+#define IP_VS_CTPL_S_LAST 0x0002
+
/* Delta sequence info structure
* Each ip_vs_conn has 2 (output AND input seq. changes).
* Only used in the VS/NAT.
@@ -1221,7 +1226,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
struct ip_vs_dest *dest, __u32 fwmark);
void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
-const char *ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(const struct ip_vs_conn *cp);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
@@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
+/* Mark our template as assured */
+static inline void
+ip_vs_control_assure_ct(struct ip_vs_conn *cp)
+{
+ struct ip_vs_conn *ct = cp->control;
+
+ if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
+ (ct->flags & IP_VS_CONN_F_TEMPLATE))
+ ct->state |= IP_VS_CTPL_S_ASSURED;
+}
+
/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
int ip_vs_control_net_init(struct netns_ipvs *ipvs);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 16475c269749..ff33f498c137 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -294,6 +294,7 @@ struct ipv6_fl_socklist {
};
struct ipcm6_cookie {
+ struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
__s8 dontfrag;
@@ -301,6 +302,25 @@ struct ipcm6_cookie {
__u16 gso_size;
};
+static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = -1,
+ .dontfrag = -1,
+ };
+}
+
+static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
+ const struct ipv6_pinfo *np)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = np->tclass,
+ .dontfrag = np->dontfrag,
+ };
+}
+
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
{
struct ipv6_txoptions *opt;
@@ -355,14 +375,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
- struct ipv6_opt_hdr __user *newopt,
- int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
- struct ipv6_txoptions *opt,
- int newtype,
- struct ipv6_opt_hdr *newopt,
- int newoptlen);
+ struct ipv6_opt_hdr *newopt);
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
@@ -561,34 +574,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
}
#endif
-struct inet_frag_queue;
-
-enum ip6_defrag_users {
- IP6_DEFRAG_LOCAL_DELIVER,
- IP6_DEFRAG_CONNTRACK_IN,
- __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_OUT,
- __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
- __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
-};
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-extern const struct rhashtable_params ip6_rhash_params;
-
-/*
- * Equivalent of ipv4 struct ip
- */
-struct frag_queue {
- struct inet_frag_queue q;
-
- int iif;
- __u16 nhoffset;
- u8 ecn;
-};
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
-
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
@@ -797,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
#if IS_ENABLED(CONFIG_IPV6)
+static inline bool ipv6_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv6.sysctl.ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
/* Sysctl settings for net ipv6.auto_flowlabels */
#define IP6_AUTO_FLOW_LABEL_OFF 0
#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
@@ -830,7 +822,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
* to minimize possbility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
- rol32(hash, 16);
+ hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
@@ -922,6 +914,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -938,8 +932,7 @@ int ip6_append_data(struct sock *sk,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags,
- const struct sockcm_cookie *sockc);
+ struct rt6_info *rt, unsigned int flags);
int ip6_push_pending_frames(struct sock *sk);
@@ -956,8 +949,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags,
- struct inet_cork_full *cork,
- const struct sockcm_cookie *sockc);
+ struct inet_cork_full *cork);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
{
@@ -1107,6 +1099,8 @@ void ipv6_sysctl_unregister(void);
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+ const struct in6_addr *addr, unsigned int mode);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 000000000000..6ced1e6899b6
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_FRAG_H
+#define _IPV6_FRAG_H
+#include <linux/kernel.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_OUT,
+ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+};
+
+/*
+ * Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+ struct inet_frag_queue q;
+
+ int iif;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
+{
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+ const struct frag_v6_compare_key *key = a;
+
+ q->key.v6 = *key;
+ fq->ecn = 0;
+}
+
+static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline int
+ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static inline void
+ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
+{
+ struct net_device *dev = NULL;
+ struct sk_buff *head;
+
+ rcu_read_lock();
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+ inet_frag_kill(&fq->q);
+
+ dev = dev_get_by_index_rcu(net, fq->iif);
+ if (!dev)
+ goto out;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
+ head = fq->q.fragments;
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ goto out;
+
+ head->dev = dev;
+ skb_get(head);
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
+out:
+ spin_unlock(&fq->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
+}
+#endif
+#endif
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index b0eaeb02d46d..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,6 +153,8 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/include/net/lag.h b/include/net/lag.h
new file mode 100644
index 000000000000..95b880e6fdde
--- /dev/null
+++ b/include/net/lag.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_LAG_H
+#define _LINUX_IF_LAG_H
+
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+#include <net/bonding.h>
+
+static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
+{
+ if (netif_is_team_port(port_dev))
+ return team_port_dev_txable(port_dev);
+ else
+ return bond_is_active_slave_dev(port_dev);
+}
+
+#endif /* _LINUX_IF_LAG_H */
diff --git a/include/net/llc.h b/include/net/llc.h
index dc35f25eb679..890a87318014 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
refcount_inc(&sap->refcnt);
}
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+ return refcount_inc_not_zero(&sap->refcnt);
+}
+
void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 851a5e19ae32..5790f55c241d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -23,6 +23,7 @@
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/codel.h>
+#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
/**
@@ -162,6 +163,8 @@ enum ieee80211_ac_numbers {
* @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
* @acm: is mandatory admission control required for the access category
* @uapsd: is U-APSD mode enabled for the queue
+ * @mu_edca: is the MU EDCA configured
+ * @mu_edca_param_rec: MU EDCA Parameter Record for HE
*/
struct ieee80211_tx_queue_params {
u16 txop;
@@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params {
u8 aifs;
bool acm;
bool uapsd;
+ bool mu_edca;
+ struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
};
struct ieee80211_low_level_stats {
@@ -463,6 +468,15 @@ struct ieee80211_mu_group_data {
* This structure keeps information about a BSS (and an association
* to that BSS) that can change during the lifetime of the BSS.
*
+ * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
+ * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
+ * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
+ * @uora_exists: is the UORA element advertised by AP
+ * @ack_enabled: indicates support to receive a multi-TID that solicits either
+ * ACK, BACK or both
+ * @uora_ocw_range: UORA element's OCW Range field
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @he_support: does this BSS support HE
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -550,6 +564,14 @@ struct ieee80211_mu_group_data {
*/
struct ieee80211_bss_conf {
const u8 *bssid;
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ bool multi_sta_back_32bit;
+ bool uora_exists;
+ bool ack_enabled;
+ u8 uora_ocw_range;
+ u16 frame_time_rts_th;
+ bool he_support;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
* frame
* @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
+ * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
+ * (&struct ieee80211_radiotap_he, mac80211 will fill in
+ * - DATA3_DATA_MCS
+ * - DATA3_DATA_DCM
+ * - DATA3_CODING
+ * - DATA5_GI
+ * - DATA5_DATA_BW_RU_ALLOC
+ * - DATA6_NSTS
+ * - DATA3_STBC
+ * from the RX info data, so leave those zeroed when building this data)
+ * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
+ * (&struct ieee80211_radiotap_he_mu)
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1134,6 +1168,8 @@ enum mac80211_rx_flags {
RX_FLAG_ICV_STRIPPED = BIT(23),
RX_FLAG_AMPDU_EOF_BIT = BIT(24),
RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
+ RX_FLAG_RADIOTAP_HE = BIT(26),
+ RX_FLAG_RADIOTAP_HE_MU = BIT(27),
};
/**
@@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding {
RX_ENC_LEGACY = 0,
RX_ENC_HT,
RX_ENC_VHT,
+ RX_ENC_HE,
};
/**
@@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding {
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
* @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
+ * @he_ru: HE RU, from &enum nl80211_he_ru_alloc
+ * @he_gi: HE GI, from &enum nl80211_he_gi
+ * @he_dcm: HE DCM value
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1211,7 +1251,8 @@ struct ieee80211_rx_status {
u32 flag;
u16 freq;
u8 enc_flags;
- u8 encoding:2, bw:3;
+ u8 encoding:2, bw:3, he_ru:3;
+ u8 he_gi:2, he_dcm:1;
u8 rate_idx;
u8 nss;
u8 rx_flags;
@@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
* that this station is allowed to transmit to us.
* Can be modified by driver.
@@ -1805,7 +1847,8 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
- u8 max_rx_aggregation_subframes;
+ struct ieee80211_sta_he_cap he_cap;
+ u16 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags {
* it shouldn't be set.
*
* @max_tx_aggregation_subframes: maximum number of subframes in an
- * aggregate an HT driver will transmit. Though ADDBA will advertise
- * a constant value of 64 as some older APs can crash if the window
- * size is smaller (an example is LinkSys WRT120N with FW v1.0.07
- * build 002 Jun 18 2012).
+ * aggregate an HT/HE device will transmit. In HT AddBA we'll
+ * advertise a constant value of 64 as some older APs crash if
+ * the window size is smaller (an example is LinkSys WRT120N
+ * with FW v1.0.07 build 002 Jun 18 2012).
+ * For AddBA to HE capable peers this value will be used.
*
* @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
* of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
@@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
+ * @radiotap_he: HE radiotap validity flags
+ *
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* 'units_pos' member is set to a non-negative value it must be set to
* a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
@@ -2263,8 +2309,8 @@ struct ieee80211_hw {
u8 max_rates;
u8 max_report_rates;
u8 max_rate_tries;
- u8 max_rx_aggregation_subframes;
- u8 max_tx_aggregation_subframes;
+ u16 max_rx_aggregation_subframes;
+ u16 max_tx_aggregation_subframes;
u8 max_tx_fragments;
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
@@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params {
struct ieee80211_sta *sta;
u16 tid;
u16 ssn;
- u8 buf_size;
+ u16 buf_size;
bool amsdu;
u16 timeout;
};
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 47e35cce3b64..9b5fdc50519a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/sysctl.h>
+#include <linux/uidgid.h>
#include <net/flow.h>
#include <net/netns/core.h>
@@ -128,6 +129,7 @@ struct net {
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag nf_frag;
+ struct ctl_table_header *nf_frag_frags_hdr;
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
@@ -169,6 +171,8 @@ extern struct net init_net;
struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
struct net *old_net);
+void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
+
void net_ns_barrier(void);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
@@ -181,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags,
return old_net;
}
+static inline void net_ns_get_ownership(const struct net *net,
+ kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+}
+
static inline void net_ns_barrier(void) {}
#endif /* CONFIG_NET_NS */
diff --git a/include/net/netevent.h b/include/net/netevent.h
index d9918261701c..4107016c3bb4 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -28,6 +28,7 @@ enum netevent_notif_type {
NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
};
int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 73f825732326..c84b51682f08 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -10,9 +10,6 @@
#ifndef _NF_CONNTRACK_IPV4_H
#define _NF_CONNTRACK_IPV4_H
-
-const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
-
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 062dc19b5840..7e012312cd61 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -41,6 +41,11 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
+struct nf_conntrack_net {
+ unsigned int users4;
+ unsigned int users6;
+};
+
#include <linux/types.h>
#include <linux/skbuff.h>
@@ -171,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto);
*/
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-void nf_ct_free_hashtable(void *hash, unsigned int size);
-
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 9b5e7634713e..2a3e0974a6af 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -14,7 +14,6 @@
#define _NF_CONNTRACK_CORE_H
#include <linux/netfilter.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
@@ -40,16 +39,8 @@ void nf_conntrack_cleanup_start(void);
void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
-bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
- struct net *net,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
-
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
/* Find a connection corresponding to a tuple. */
@@ -75,10 +66,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
return ret;
}
-void
-print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *proto);
+void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l4proto *proto);
#define CONNTRACK_LOCKS 1024
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 3a188a0923a3..4b2b2baf8ab4 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -1,8 +1,23 @@
#ifndef _NF_CONNTRACK_COUNT_H
#define _NF_CONNTRACK_COUNT_H
+#include <linux/list.h>
+
struct nf_conncount_data;
+enum nf_conncount_list_add {
+ NF_CONNCOUNT_ADDED, /* list add was ok */
+ NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
+ NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
+};
+
+struct nf_conncount_list {
+ spinlock_t list_lock;
+ struct list_head head; /* connections with the same filtering key */
+ unsigned int count; /* length of list */
+ bool dead;
+};
+
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
unsigned int keylen);
void nf_conncount_destroy(struct net *net, unsigned int family,
@@ -14,15 +29,21 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
-unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit);
+void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ bool *addit);
+
+void nf_conncount_list_init(struct nf_conncount_list *list);
+
+enum nf_conncount_list_add
+nf_conncount_add(struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
-bool nf_conncount_add(struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
+bool nf_conncount_gc_list(struct net *net,
+ struct nf_conncount_list *list);
-void nf_conncount_cache_free(struct hlist_head *hhead);
+void nf_conncount_cache_free(struct nf_conncount_list *list);
#endif
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c2a94a219d..2492120b8097 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
unsigned int);
-struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
- struct nf_conntrack_helper *helper,
- gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
deleted file mode 100644
index d5808f3e2715..000000000000
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C)2003,2004 USAGI/WIDE Project
- *
- * Header for use in defining a given L3 protocol for connection tracking.
- *
- * Author:
- * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *
- * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
- */
-
-#ifndef _NF_CONNTRACK_L3PROTO_H
-#define _NF_CONNTRACK_L3PROTO_H
-#include <linux/netlink.h>
-#include <net/netlink.h>
-#include <linux/seq_file.h>
-#include <net/netfilter/nf_conntrack.h>
-
-struct nf_conntrack_l3proto {
- /* L3 Protocol Family number. ex) PF_INET */
- u_int16_t l3proto;
-
- /* size of tuple nlattr, fills a hole */
- u16 nla_size;
-
- /*
- * Try to fill in the third arg: nhoff is offset of l3 proto
- * hdr. Return true if possible.
- */
- bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
- struct nf_conntrack_tuple *tuple);
-
- /*
- * Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Some packets can't be inverted: return 0 in that case.
- */
- bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
-
- /*
- * Called before tracking.
- * *dataoff: offset of protocol header (TCP, UDP,...) in skb
- * *protonum: protocol number
- */
- int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int *dataoff, u_int8_t *protonum);
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- int (*tuple_to_nlattr)(struct sk_buff *skb,
- const struct nf_conntrack_tuple *t);
- int (*nlattr_to_tuple)(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
- const struct nla_policy *nla_policy;
-#endif
-
- /* Called when netns wants to use connection tracking */
- int (*net_ns_get)(struct net *);
- void (*net_ns_put)(struct net *);
-
- /* Module (if any) which this is connected to. */
- struct module *me;
-};
-
-extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
-
-/* Protocol global registration. */
-int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
-void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
-
-const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-
-/* Existing built-in protocols */
-extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
-
-static inline struct nf_conntrack_l3proto *
-__nf_ct_l3proto_find(u_int16_t l3proto)
-{
- if (unlikely(l3proto >= NFPROTO_NUMPROTO))
- return &nf_conntrack_l3proto_generic;
- return rcu_dereference(nf_ct_l3protos[l3proto]);
-}
-
-#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index a7220eef9aee..8465263b297d 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -36,7 +36,7 @@ struct nf_conntrack_l4proto {
struct net *net, struct nf_conntrack_tuple *tuple);
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Some packets can't be inverted: return 0 in that case.
+ * Only used by icmp, most protocols use a generic version.
*/
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
@@ -45,13 +45,12 @@ struct nf_conntrack_l4proto {
int (*packet)(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts);
+ enum ip_conntrack_info ctinfo);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts);
+ unsigned int dataoff);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
@@ -63,9 +62,6 @@ struct nf_conntrack_l4proto {
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
- /* Return the array of timeouts for this protocol. */
- unsigned int *(*get_timeouts)(struct net *net);
-
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
@@ -81,7 +77,6 @@ struct nf_conntrack_l4proto {
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
int (*nlattr_to_obj)(struct nlattr *tb[],
struct net *net, void *data);
@@ -91,7 +86,6 @@ struct nf_conntrack_l4proto {
u16 nlattr_max;
const struct nla_policy *nla_policy;
} ctnl_timeout;
-#endif
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
@@ -134,10 +128,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
/* Protocol global registration. */
int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
- unsigned int num_proto);
-void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
- unsigned int num_proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 9468ab4ad12d..d5f62cc6c2ae 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -11,24 +11,28 @@
#define CTNL_TIMEOUT_NAME_MAX 32
+struct nf_ct_timeout {
+ __u16 l3num;
+ const struct nf_conntrack_l4proto *l4proto;
+ char data[0];
+};
+
struct ctnl_timeout {
struct list_head head;
struct rcu_head rcu_head;
refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
- __u16 l3num;
- const struct nf_conntrack_l4proto *l4proto;
- char data[0];
+ struct nf_ct_timeout timeout;
};
struct nf_conn_timeout {
- struct ctnl_timeout __rcu *timeout;
+ struct nf_ct_timeout __rcu *timeout;
};
static inline unsigned int *
nf_ct_timeout_data(struct nf_conn_timeout *t)
{
- struct ctnl_timeout *timeout;
+ struct nf_ct_timeout *timeout;
timeout = rcu_dereference(t->timeout);
if (timeout == NULL)
@@ -49,7 +53,7 @@ struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
static inline
struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
- struct ctnl_timeout *timeout,
+ struct nf_ct_timeout *timeout,
gfp_t gfp)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
@@ -67,32 +71,23 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
#endif
};
-static inline unsigned int *
-nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
- const struct nf_conntrack_l4proto *l4proto)
+static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
{
+ unsigned int *timeouts = NULL;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
- unsigned int *timeouts;
timeout_ext = nf_ct_timeout_find(ct);
- if (timeout_ext) {
+ if (timeout_ext)
timeouts = nf_ct_timeout_data(timeout_ext);
- if (unlikely(!timeouts))
- timeouts = l4proto->get_timeouts(net);
- } else {
- timeouts = l4proto->get_timeouts(net);
- }
-
- return timeouts;
-#else
- return l4proto->get_timeouts(net);
#endif
+ return timeouts;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
int nf_conntrack_timeout_init(void);
void nf_conntrack_timeout_fini(void);
+void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
#else
static inline int nf_conntrack_timeout_init(void)
{
@@ -106,8 +101,8 @@ static inline void nf_conntrack_timeout_fini(void)
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
-extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
+extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
+extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index ba9fa4592f2b..0e355f4a3d76 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -4,7 +4,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/rcupdate.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/dst.h>
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e811ac07ea94..0d3920896d50 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags);
-void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
+void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+ struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 08c005ce56e9..dc417ef0a0c5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
* @portid: netlink portID of the original message
* @seq: netlink sequence number
* @family: protocol family
+ * @level: depth of the chains
* @report: notify via unicast netlink message
*/
struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
u32 portid;
u32 seq;
u8 family;
+ u8 level;
bool report;
};
@@ -865,7 +867,6 @@ enum nft_chain_flags {
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @level: length of longest path to this chain
* @flags: bitmask of enum nft_chain_flags
* @name: name of the chain
*/
@@ -878,7 +879,6 @@ struct nft_chain {
struct nft_table *table;
u64 handle;
u32 use;
- u16 level;
u8 flags:6,
genmask:2;
char *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
u32 genmask:2,
use:30;
u64 handle;
- char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
struct nf_flowtable data;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index e0c0c2558ec4..8da837d2aaf9 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -65,4 +65,17 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
+extern struct nft_set_type nft_set_rhash_type;
+extern struct nft_set_type nft_set_hash_type;
+extern struct nft_set_type nft_set_hash_fast_type;
+extern struct nft_set_type nft_set_rbtree_type;
+extern struct nft_set_type nft_set_bitmap_type;
+
+struct nft_expr;
+struct nft_regs;
+struct nft_pktinfo;
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 9754a50ecde9..82d0e41b76f2 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
return false;
}
+/* assign a socket to the skb -- consumes sk */
+static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
@@ -64,7 +72,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
* belonging to established connections going through that one.
*/
struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
@@ -103,7 +111,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
struct sock *sk);
struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
const u8 protocol,
const struct in6_addr *saddr, const struct in6_addr *daddr,
const __be16 sport, const __be16 dport,
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 24c78183a4c2..16a842456189 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -9,12 +9,7 @@ struct net;
static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
- /*
- * shift this right to eliminate bits, that are
- * always zeroed
- */
-
- return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+ return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
#else
return 0;
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 661348f23ea5..e47503b4e4d1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -98,6 +98,7 @@ struct netns_ipv4 {
int sysctl_ip_default_ttl;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c978a31b0f84..f0e396ab9bec 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,7 @@ struct netns_sysctl_ipv6 {
int flowlabel_consistency;
int auto_flowlabels;
int icmpv6_time;
+ int icmpv6_echo_ignore_all;
int anycast_src_echo_reply;
int ip_nonlocal_bind;
int fwmark_reflect;
@@ -109,7 +110,6 @@ struct netns_ipv6 {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag {
- struct netns_sysctl_ipv6 sysctl;
struct netns_frags frags;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 94767ea3a490..286fd960896f 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,6 +7,7 @@
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
+ struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
u8 validate_state;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a3c1a2c47cd4..ef727f71336e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -7,12 +7,16 @@
#include <net/sch_generic.h>
#include <net/act_api.h>
+/* TC action not accessible from user space */
+#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
+
/* Basic packet classifier frontend definitions. */
struct tcf_walker {
int stop;
int skip;
int count;
+ unsigned long cookie;
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
};
@@ -36,9 +40,9 @@ struct tcf_block_cb;
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
#ifdef CONFIG_NET_CLS
-struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
- bool create);
-void tcf_chain_put(struct tcf_chain *chain);
+struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
+ u32 chain_index);
+void tcf_chain_put_by_act(struct tcf_chain *chain);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -73,11 +77,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv);
+ void *cb_priv,
+ struct netlink_ext_ack *extack);
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
+ void *cb_priv, struct netlink_ext_ack *extack);
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
@@ -161,7 +167,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
static inline
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv,
+ struct netlink_ext_ack *extack)
{
return NULL;
}
@@ -169,13 +176,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
static inline
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv, struct netlink_ext_ack *extack)
{
return 0;
}
static inline
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb)
{
}
@@ -596,6 +604,7 @@ struct tc_block_offload {
enum tc_block_command command;
enum tcf_block_binder_type binder_type;
struct tcf_block *block;
+ struct netlink_ext_ack *extack;
};
struct tc_cls_common_offload {
@@ -715,6 +724,8 @@ enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
TC_CLSFLOWER_STATS,
+ TC_CLSFLOWER_TMPLT_CREATE,
+ TC_CLSFLOWER_TMPLT_DESTROY,
};
struct tc_cls_flower_offload {
@@ -771,6 +782,7 @@ struct tc_mqprio_qopt_offload {
struct tc_cookie {
u8 *data;
u32 len;
+ struct rcu_head rcu;
};
struct tc_qopt_offload_stats {
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 815b92a23936..7dc769e5452b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -72,6 +72,8 @@ struct qdisc_watchdog {
struct Qdisc *qdisc;
};
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+ clockid_t clockid);
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
@@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload {
s32 sendslope;
};
+struct tc_etf_qopt_offload {
+ u8 enable;
+ s32 queue;
+};
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6488daa32f82..a6d00093f35e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -20,6 +20,9 @@ struct qdisc_walker;
struct tcf_walker;
struct module;
+typedef int tc_setup_cb_t(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -232,9 +235,17 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
+
+ /* used by the TC_ACT_REINSERT action */
+ struct {
+ bool ingress;
+ struct gnet_stats_queue *qstats;
+ };
};
};
+struct tcf_chain;
+
struct tcf_proto_ops {
struct list_head head;
char kind[IFNAMSIZ];
@@ -256,11 +267,22 @@ struct tcf_proto_ops {
bool *last,
struct netlink_ext_ack *);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ int (*reoffload)(struct tcf_proto *tp, bool add,
+ tc_setup_cb_t *cb, void *cb_priv,
+ struct netlink_ext_ack *extack);
void (*bind_class)(void *, u32, unsigned long);
+ void * (*tmplt_create)(struct net *net,
+ struct tcf_chain *chain,
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack);
+ void (*tmplt_destroy)(void *tmplt_priv);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*);
+ int (*tmplt_dump)(struct sk_buff *skb,
+ struct net *net,
+ void *tmplt_priv);
struct module *owner;
};
@@ -269,6 +291,8 @@ struct tcf_proto {
/* Fast access part */
struct tcf_proto __rcu *next;
void __rcu *root;
+
+ /* called under RCU BH lock*/
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -294,11 +318,14 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
struct tcf_proto __rcu *filter_chain;
- struct list_head filter_chain_list;
struct list_head list;
struct tcf_block *block;
u32 index; /* chain index */
unsigned int refcnt;
+ unsigned int action_refcnt;
+ bool explicitly_created;
+ const struct tcf_proto_ops *tmplt_ops;
+ void *tmplt_priv;
};
struct tcf_block {
@@ -312,6 +339,10 @@ struct tcf_block {
bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ struct {
+ struct tcf_chain *chain;
+ struct list_head filter_chain_list;
+ } chain0;
};
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -330,6 +361,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
block->offloadcnt--;
}
+static inline void
+tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
+ u32 *flags, bool add)
+{
+ if (add) {
+ if (!*cnt)
+ tcf_block_offload_inc(block, flags);
+ (*cnt)++;
+ } else {
+ (*cnt)--;
+ if (!*cnt)
+ tcf_block_offload_dec(block, flags);
+ }
+}
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
@@ -529,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb)
#endif
}
+static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return skb->tc_redirected;
+#else
+ return false;
+#endif
+}
+
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -1068,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+{
+ struct gnet_stats_queue *stats = res->qstats;
+ int ret;
+
+ if (res->ingress)
+ ret = netif_receive_skb(skb);
+ else
+ ret = dev_queue_xmit(skb);
+ if (ret && stats)
+ qstats_overlimit_inc(res->qstats);
+}
+
#endif
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 30b3e2fe240a..8c2caa370e0f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index dbe1b911a24d..28a7c8e44636 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -48,7 +48,7 @@
#define __sctp_structs_h__
#include <linux/ktime.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/socket.h> /* linux/in.h needs this!! */
#include <linux/in.h> /* We get struct sockaddr_in. */
#include <linux/in6.h> /* We get struct in6_addr */
@@ -57,6 +57,7 @@
#include <linux/atomic.h> /* This gets us atomic counters. */
#include <linux/skbuff.h> /* We need sk_buff_head. */
#include <linux/workqueue.h> /* We need tq_struct. */
+#include <linux/flex_array.h> /* We need flex_array. */
#include <linux/sctp.h> /* We need sctp* header structs. */
#include <net/sctp/auth.h> /* We need auth specific structs */
#include <net/ip.h> /* For inet_skb_parm */
@@ -193,6 +194,9 @@ struct sctp_sock {
/* This is the max_retrans value for new associations. */
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -220,6 +224,7 @@ struct sctp_sock {
__u32 adaptation_ind;
__u32 pd_point;
__u16 nodelay:1,
+ reuse:1,
disable_fragments:1,
v4mapped:1,
frag_interleave:1,
@@ -394,37 +399,35 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
/* What is the current SSN number for this stream? */
#define sctp_ssn_peek(stream, type, sid) \
- ((stream)->type[sid].ssn)
+ (sctp_stream_##type((stream), (sid))->ssn)
/* Return the next SSN number for this stream. */
#define sctp_ssn_next(stream, type, sid) \
- ((stream)->type[sid].ssn++)
+ (sctp_stream_##type((stream), (sid))->ssn++)
/* Skip over this ssn and all below. */
#define sctp_ssn_skip(stream, type, sid, ssn) \
- ((stream)->type[sid].ssn = ssn + 1)
+ (sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
/* What is the current MID number for this stream? */
#define sctp_mid_peek(stream, type, sid) \
- ((stream)->type[sid].mid)
+ (sctp_stream_##type((stream), (sid))->mid)
/* Return the next MID number for this stream. */
#define sctp_mid_next(stream, type, sid) \
- ((stream)->type[sid].mid++)
+ (sctp_stream_##type((stream), (sid))->mid++)
/* Skip over this mid and all below. */
#define sctp_mid_skip(stream, type, sid, mid) \
- ((stream)->type[sid].mid = mid + 1)
-
-#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+ (sctp_stream_##type((stream), (sid))->mid = mid + 1)
/* What is the current MID_uo number for this stream? */
#define sctp_mid_uo_peek(stream, type, sid) \
- ((stream)->type[sid].mid_uo)
+ (sctp_stream_##type((stream), (sid))->mid_uo)
/* Return the next MID_uo number for this stream. */
#define sctp_mid_uo_next(stream, type, sid) \
- ((stream)->type[sid].mid_uo++)
+ (sctp_stream_##type((stream), (sid))->mid_uo++)
/*
* Pointers to address related SCTP functions.
@@ -894,6 +897,9 @@ struct sctp_transport {
*/
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* This is the partially failed retrans value for the transport
* and will be initialized from the assocs value. This can be changed
* using the SCTP_PEER_ADDR_THLDS socket option
@@ -1433,8 +1439,8 @@ struct sctp_stream_in {
};
struct sctp_stream {
- struct sctp_stream_out *out;
- struct sctp_stream_in *in;
+ struct flex_array *out;
+ struct flex_array *in;
__u16 outcnt;
__u16 incnt;
/* Current stream being sent, if any */
@@ -1456,6 +1462,23 @@ struct sctp_stream {
struct sctp_stream_interleave *si;
};
+static inline struct sctp_stream_out *sctp_stream_out(
+ const struct sctp_stream *stream,
+ __u16 sid)
+{
+ return flex_array_get(stream->out, sid);
+}
+
+static inline struct sctp_stream_in *sctp_stream_in(
+ const struct sctp_stream *stream,
+ __u16 sid)
+{
+ return flex_array_get(stream->in, sid);
+}
+
+#define SCTP_SO(s, i) sctp_stream_out((s), (i))
+#define SCTP_SI(s, i) sctp_stream_in((s), (i))
+
#define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01
@@ -1771,6 +1794,9 @@ struct sctp_association {
*/
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* Flag that path mtu update is pending */
__u8 pmtu_pending;
diff --git a/include/net/seg6.h b/include/net/seg6.h
index e029e301faa5..2567941a2f32 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -18,7 +18,7 @@
#include <linux/ipv6.h>
#include <net/lwtunnel.h>
#include <linux/seg6.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
__be32 to)
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 69c3a106056b..7fda469e2758 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -22,7 +22,7 @@
#include <linux/route.h>
#include <net/seg6.h>
#include <linux/seg6_hmac.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#define SEG6_HMAC_MAX_DIGESTSIZE 160
#define SEG6_HMAC_RING_SIZE 256
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 661fd5b4d3e0..08359e2d8b35 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -21,10 +21,12 @@
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
+extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
struct seg6_bpf_srh_state {
- bool valid;
+ struct ipv6_sr_hdr *srh;
u16 hdrlen;
+ bool valid;
};
DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
diff --git a/include/net/smc.h b/include/net/smc.h
index 8381d163fefa..9ef49f8b1002 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -11,6 +11,8 @@
#ifndef _SMC_H
#define _SMC_H
+#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
+
struct smc_hashinfo {
rwlock_t lock;
struct hlist_head ht;
@@ -18,4 +20,67 @@ struct smc_hashinfo {
int smc_hash_sk(struct sock *sk);
void smc_unhash_sk(struct sock *sk);
+
+/* SMCD/ISM device driver interface */
+struct smcd_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+#define ISM_EVENT_DMB 0
+#define ISM_EVENT_GID 1
+#define ISM_EVENT_SWR 2
+
+struct smcd_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct smcd_dev;
+
+struct smcd_ops {
+ int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
+ u32 vid);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*set_vlan_required)(struct smcd_dev *dev);
+ int (*reset_vlan_required)(struct smcd_dev *dev);
+ int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info);
+ int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+};
+
+struct smcd_dev {
+ const struct smcd_ops *ops;
+ struct device dev;
+ void *priv;
+ u64 local_gid;
+ struct list_head list;
+ spinlock_t lock;
+ struct smc_connection **conn;
+ struct list_head vlan;
+ struct workqueue_struct *event_wq;
+ u8 pnetid[SMC_MAX_PNETID_LEN];
+};
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ const struct smcd_ops *ops, int max_dmbs);
+int smcd_register_dev(struct smcd_dev *smcd);
+void smcd_unregister_dev(struct smcd_dev *smcd);
+void smcd_free_dev(struct smcd_dev *smcd);
+void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
+void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index b3b75419eafe..433f45fc2d68 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair;
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_tx_queue_mapping: tx queue number for this connection
+ * @skc_rx_queue_mapping: rx queue number for this connection
* @skc_flags: place holder for sk_flags
* %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -214,7 +215,10 @@ struct sock_common {
struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node;
};
- int skc_tx_queue_mapping;
+ unsigned short skc_tx_queue_mapping;
+#ifdef CONFIG_XPS
+ unsigned short skc_rx_queue_mapping;
+#endif
union {
int skc_incoming_cpu;
u32 skc_rcv_wnd;
@@ -315,6 +319,9 @@ struct sock_common {
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
* @sk_rcu: used during RCU grace period
+ * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
+ * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+ * @sk_txtime_unused: unused txtime flags
*/
struct sock {
/*
@@ -326,6 +333,9 @@ struct sock {
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
+#ifdef CONFIG_XPS
+#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
+#endif
#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
#define sk_dontcopy_end __sk_common.skc_dontcopy_end
@@ -468,6 +478,12 @@ struct sock {
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
+
+ u8 sk_clockid;
+ u8 sk_txtime_deadline_mode : 1,
+ sk_txtime_report_errors : 1,
+ sk_txtime_unused : 6;
+
struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
@@ -783,6 +799,7 @@ enum sock_flags {
SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
+ SOCK_TXTIME,
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
struct sockcm_cookie {
+ u64 transmit_time;
u32 mark;
u16 tsflags;
};
+static inline void sockcm_init(struct sockcm_cookie *sockc,
+ const struct sock *sk)
+{
+ *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+}
+
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
@@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
sk->sk_tx_queue_mapping = tx_queue;
}
+#define NO_QUEUE_MAPPING USHRT_MAX
+
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = -1;
+ sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- return sk ? sk->sk_tx_queue_mapping : -1;
+ if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+ return sk->sk_tx_queue_mapping;
+
+ return -1;
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ if (skb_rx_queue_recorded(skb)) {
+ u16 rx_queue = skb_get_rx_queue(skb);
+
+ if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
+ return;
+
+ sk->sk_rx_queue_mapping = rx_queue;
+ }
+#endif
+}
+
+static inline void sk_rx_queue_clear(struct sock *sk)
+{
+#ifdef CONFIG_XPS
+ sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+#endif
+}
+
+#ifdef CONFIG_XPS
+static inline int sk_rx_queue_get(const struct sock *sk)
+{
+ if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
+ return sk->sk_rx_queue_mapping;
+
+ return -1;
+}
+#endif
+
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk_tx_queue_clear(sk);
@@ -1725,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
{
WARN_ON(parent->sk);
write_lock_bh(&sk->sk_callback_lock);
- sk->sk_wq = parent->wq;
+ rcu_assign_pointer(sk->sk_wq, parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
sk->sk_uid = SOCK_INODE(parent)->i_uid;
@@ -1994,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
- * @wait_address: socket wait queue
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
*/
-static inline void sock_poll_wait(struct file *filp,
- wait_queue_head_t *wait_address, poll_table *p)
+static inline void sock_poll_wait(struct file *filp, poll_table *p)
{
- if (!poll_does_not_wait(p) && wait_address) {
- poll_wait(filp, wait_address, p);
+ struct socket *sock = filp->private_data;
+
+ if (!poll_does_not_wait(p)) {
+ poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
* socket flags modification.
*
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 0054b3a9b923..8a5f70c7cdf2 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -5,25 +5,36 @@
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <net/sock.h>
+extern spinlock_t reuseport_lock;
+
struct sock_reuseport {
struct rcu_head rcu;
u16 max_socks; /* length of socks */
u16 num_socks; /* elements in socks */
+ /* The last synq overflow event timestamp of this
+ * reuse->socks[] group.
+ */
+ unsigned int synq_overflow_ts;
+ /* ID stays the same even after the size of socks[] grows. */
+ unsigned int reuseport_id;
+ bool bind_inany;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
struct sock *socks[0]; /* array of sock pointers */
};
-extern int reuseport_alloc(struct sock *sk);
-extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
+extern int reuseport_alloc(struct sock *sk, bool bind_inany);
+extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
+ bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
-extern struct bpf_prog *reuseport_attach_prog(struct sock *sk,
- struct bpf_prog *prog);
+extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+int reuseport_get_id(struct sock_reuseport *reuse);
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9470fd7e4350..32d2454c0479 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -7,7 +7,6 @@
#include <linux/tc_act/tc_csum.h>
struct tcf_csum_params {
- int action;
u32 update_flags;
struct rcu_head rcu;
};
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 227a6f1d02f4..fac3ad4a86de 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -17,6 +17,7 @@ struct tcf_pedit {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
};
+
#define to_pedit(a) ((struct tcf_pedit *)a)
static inline bool is_tcf_pedit(const struct tc_action *a)
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 19cd3d345804..911bbac838a2 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -22,14 +22,19 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_skbedit.h>
+struct tcf_skbedit_params {
+ u32 flags;
+ u32 priority;
+ u32 mark;
+ u32 mask;
+ u16 queue_mapping;
+ u16 ptype;
+ struct rcu_head rcu;
+};
+
struct tcf_skbedit {
- struct tc_action common;
- u32 flags;
- u32 priority;
- u32 mark;
- u32 mask;
- u16 queue_mapping;
- u16 ptype;
+ struct tc_action common;
+ struct tcf_skbedit_params __rcu *params;
};
#define to_skbedit(a) ((struct tcf_skbedit *)a)
@@ -37,15 +42,27 @@ struct tcf_skbedit {
static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
- return to_skbedit(a)->flags == SKBEDIT_F_MARK;
+ u32 flags;
+
+ if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
+ rcu_read_lock();
+ flags = rcu_dereference(to_skbedit(a)->params)->flags;
+ rcu_read_unlock();
+ return flags == SKBEDIT_F_MARK;
+ }
#endif
return false;
}
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
{
- return to_skbedit(a)->mark;
+ u32 mark;
+
+ rcu_read_lock();
+ mark = rcu_dereference(to_skbedit(a)->params)->mark;
+ rcu_read_unlock();
+
+ return mark;
}
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index efef0b4b1b2b..46b8c7f1c8d5 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -18,7 +18,6 @@
struct tcf_tunnel_key_params {
struct rcu_head rcu;
int tcft_action;
- int action;
struct metadata_dst *tcft_enc_metadata;
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0448e7c5d2b4..d196901c9dba 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -36,6 +36,7 @@
#include <net/inet_hashtables.h>
#include <net/checksum.h>
#include <net/request_sock.h>
+#include <net/sock_reuseport.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -342,6 +343,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
@@ -388,7 +390,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -471,19 +474,45 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
*/
static inline void tcp_synq_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- unsigned long now = jiffies;
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
- if (time_after(now, last_overflow + HZ))
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ if (time_after32(now, last_overflow + HZ))
+ WRITE_ONCE(reuse->synq_overflow_ts, now);
+ return;
+ }
+ }
+
+ last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ if (time_after32(now, last_overflow + HZ))
tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
}
/* syncookies: no recent synqueue overflow on this listening socket? */
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
+
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
- return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ return time_after32(now, last_overflow +
+ TCP_SYNCOOKIE_VALID);
+ }
+ }
+
+ last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
}
static inline u32 tcp_cookie_time(void)
@@ -538,6 +567,7 @@ void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -827,6 +857,10 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
@@ -834,6 +868,11 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
+ return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
+{
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
@@ -907,8 +946,6 @@ enum tcp_ca_event {
CA_EVENT_LOSS, /* loss timeout */
CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
- CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
- CA_EVENT_NON_DELAYED_ACK,
};
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
@@ -953,6 +990,8 @@ struct rate_sample {
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */
+ u32 snd_interval_us; /* snd interval for delivered packets */
+ u32 rcv_interval_us; /* rcv interval for delivered packets */
long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
@@ -1184,6 +1223,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
return tp->is_cwnd_limited;
}
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static inline bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+ return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
/* Something is really bad, we could not queue an additional packet,
* because qdisc is full or receiver sent a 0 window.
* We do not want to add fuel to the fire, or abort too early,
@@ -1361,7 +1411,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
{
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
- if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
+ if (unlikely(!time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1391,7 +1442,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && !time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
return false;
return true;
}
@@ -1777,7 +1829,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
int tcp_gro_complete(struct sk_buff *skb);
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
diff --git a/include/net/tls.h b/include/net/tls.h
index 7f84ea3e217c..d5c683e8bb22 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,6 +83,16 @@ struct tls_device {
void (*unhash)(struct tls_device *device, struct sock *sk);
};
+enum {
+ TLS_BASE,
+ TLS_SW,
+#ifdef CONFIG_TLS_DEVICE
+ TLS_HW,
+#endif
+ TLS_HW_RECORD,
+ TLS_NUM_CONFIG,
+};
+
struct tls_sw_context_tx {
struct crypto_aead *aead_send;
struct crypto_wait async_wait;
@@ -109,14 +119,11 @@ struct tls_sw_context_rx {
struct strparser strp;
void (*saved_data_ready)(struct sock *sk);
- __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+ unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
-
- char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
- char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
-
};
struct tls_record_info {
@@ -127,7 +134,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct tls_offload_context {
+struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
struct list_head records_list;
@@ -146,8 +153,8 @@ struct tls_offload_context {
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
};
-#define TLS_OFFLOAD_CONTEXT_SIZE \
- (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
+#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
+ (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
enum {
@@ -196,6 +203,7 @@ struct tls_context {
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
+ void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
int (*setsockopt)(struct sock *sk, int level,
@@ -208,13 +216,27 @@ struct tls_context {
void (*unhash)(struct sock *sk);
};
+struct tls_offload_context_rx {
+ /* sw must be the first member of tls_offload_context_rx */
+ struct tls_sw_context_rx sw;
+ atomic64_t resync_req;
+ u8 driver_state[];
+ /* The TLS layer reserves room for driver specific state
+ * Currently the belief is that there is not enough
+ * driver specific state to justify another layer of indirection
+ */
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
+ (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
+ TLS_DRIVER_STATE_SIZE)
+
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -222,9 +244,11 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
void tls_sw_close(struct sock *sk, long timeout);
void tls_sw_free_resources_tx(struct sock *sk);
void tls_sw_free_resources_rx(struct sock *sk);
+void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
@@ -237,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
-struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -287,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+struct sk_buff *
+tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
+
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
- return sk_fullsock(sk) &&
- /* matches smp_store_release in tls_set_device_offload */
- smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ return sk_fullsock(sk) &
+ (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
+ &tls_validate_xmit_skb);
+#else
+ return false;
+#endif
}
static inline void tls_err_abort(struct sock *sk, int err)
@@ -378,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
}
-static inline struct tls_offload_context *tls_offload_ctx(
- const struct tls_context *tls_ctx)
+static inline struct tls_offload_context_tx *
+tls_offload_ctx_tx(const struct tls_context *tls_ctx)
+{
+ return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
+}
+
+static inline struct tls_offload_context_rx *
+tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
- return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
+ return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
+/* The TLS context is valid until sk_destruct is called */
+static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
+}
+
+
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+int decrypt_skb(struct sock *sk, struct sk_buff *skb,
+ struct scatterlist *sgout);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb);
int tls_sw_fallback_init(struct sock *sk,
- struct tls_offload_context *offload_ctx,
+ struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
+
+void tls_device_offload_cleanup_rx(struct sock *sk);
+void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index f6a3543e5247..a8f6020f1196 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
- struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
- struct sockcm_cookie *sockc);
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int rqueue, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index b1ea8b0f5e6a..8482a990b0bb 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
-struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh, udp_lookup_t lookup);
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, udp_lookup_t lookup);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b95a6927c718..fe680ab6b15a 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
-typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb);
+typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb);
typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
int nhoff);
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 2deea7166a34..76b95256c266 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -84,6 +84,13 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+/* Clear kernel pointers in xdp_frame */
+static inline void xdp_scrub_frame(struct xdp_frame *frame)
+{
+ frame->data = NULL;
+ frame->dev_rx = NULL;
+}
+
/* Convert xdp_buff to xdp_frame */
static inline
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
@@ -144,4 +151,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
return unlikely(xdp->data_meta > xdp->data);
}
+struct xdp_attachment_info {
+ struct bpf_prog *prog;
+ u32 flags;
+};
+
+struct netdev_bpf;
+int xdp_attachment_query(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 9fe472f2ac95..7161856bcf9c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -60,6 +60,10 @@ struct xdp_sock {
bool zc;
/* Protects multiple processes in the control path */
struct mutex mutex;
+ /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+ * in the SKB destructor callback.
+ */
+ spinlock_t tx_completion_lock;
u64 rx_dropped;
};
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 557122846e0e..0eb390c205af 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -23,6 +23,7 @@
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/flow.h>
+#include <net/gro_cells.h>
#include <linux/interrupt.h>
@@ -147,6 +148,7 @@ struct xfrm_state {
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
+ u32 if_id;
u32 tfcpad;
u32 genid;
@@ -166,7 +168,7 @@ struct xfrm_state {
int header_len;
int trailer_len;
u32 extra_flags;
- u32 output_mark;
+ struct xfrm_mark smark;
} props;
struct xfrm_lifetime_cfg lft;
@@ -225,7 +227,7 @@ struct xfrm_state {
long saved_tmo;
/* Last used time */
- unsigned long lastused;
+ time64_t lastused;
struct page_frag xfrag;
@@ -292,6 +294,13 @@ struct xfrm_replay {
int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
};
+struct xfrm_if_cb {
+ struct xfrm_if *(*decode_session)(struct sk_buff *skb);
+};
+
+void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
+void xfrm_if_unregister_cb(void);
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
@@ -323,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
-void xfrm_policy_cache_flush(void);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
@@ -574,6 +582,7 @@ struct xfrm_policy {
atomic_t genid;
u32 priority;
u32 index;
+ u32 if_id;
struct xfrm_mark mark;
struct xfrm_selector selector;
struct xfrm_lifetime_cfg lft;
@@ -735,7 +744,7 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
{
struct audit_buffer *audit_buf = NULL;
- if (audit_enabled == 0)
+ if (audit_enabled == AUDIT_OFF)
return NULL;
audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
AUDIT_MAC_IPSEC_EVENT);
@@ -1037,6 +1046,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+struct xfrm_if_parms {
+ char name[IFNAMSIZ]; /* name of XFRM device */
+ int link; /* ifindex of underlying L2 interface */
+ u32 if_id; /* interface identifyer */
+};
+
+struct xfrm_if {
+ struct xfrm_if __rcu *next; /* next interface in list */
+ struct net_device *dev; /* virtual device associated with interface */
+ struct net_device *phydev; /* physical device */
+ struct net *net; /* netns for packet i/o */
+ struct xfrm_if_parms p; /* interface parms */
+
+ struct gro_cells gro_cells;
+};
+
struct xfrm_offload {
/* Output sequence number for replay protection on offloading. */
struct {
@@ -1532,8 +1557,8 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const struct flowi *fl,
struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
- unsigned short family);
-struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+ unsigned short family, u32 if_id);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr,
xfrm_address_t *saddr,
unsigned short family,
@@ -1690,20 +1715,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
- u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
+ int dir, u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
- u8 mode, u32 reqid, u8 proto,
+ u8 mode, u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
@@ -2012,6 +2037,22 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
return ret;
}
+static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
+{
+ struct xfrm_mark *m = &x->props.smark;
+
+ return (m->v & m->m) | (mark & ~m->m);
+}
+
+static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
+{
+ int ret = 0;
+
+ if (if_id)
+ ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
+ return ret;
+}
+
static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
unsigned int family)
{
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4c6241bc2039..6c003995347a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
*
* Users can examine the cq structure to determine the actual CQ size.
*/
-struct ib_cq *ib_create_cq(struct ib_device *device,
- ib_comp_handler comp_handler,
- void (*event_handler)(struct ib_event *, void *),
- void *cq_context,
- const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr,
+ const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+ __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
/**
* ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 225ab7783dfd..3de3b10da19a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,7 +161,7 @@ struct sata_device {
u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
- struct ata_host ata_host;
+ struct ata_host *ata_host;
struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index aaf1e971c6a3..c891ada3c5c2 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -4,6 +4,7 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
+#include <linux/t10-pi.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
@@ -14,8 +15,6 @@
struct Scsi_Host;
struct scsi_driver;
-#include <scsi/scsi_device.h>
-
/*
* MAX_COMMAND_SIZE is:
* The longest fixed-length SCSI CDB as per the SCSI standard.
@@ -120,11 +119,11 @@ struct scsi_cmnd {
struct request *request; /* The command we are
working on */
-#define SCSI_SENSE_BUFFERSIZE 96
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
- * command (auto-sense) */
+ * command (auto-sense). Length must be
+ * SCSI_SENSE_BUFFERSIZE bytes. */
/* Low-level done function - can be used by low-level driver to point
* to completion function. Not used by mid/upper level code. */
@@ -313,12 +312,6 @@ static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
return scmd->device->sector_size;
}
-static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
-{
- return blk_rq_pos(scmd->request) >>
- (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
-}
-
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4c36af6edd79..202f4d6a4342 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -17,6 +17,8 @@ struct scsi_sense_hdr;
typedef __u64 __bitwise blist_flags_t;
+#define SCSI_SENSE_BUFFERSIZE 96
+
struct scsi_mode_data {
__u32 length;
__u16 block_descriptor_length;
@@ -426,11 +428,21 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
-extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags,
req_flags_t rq_flags, int *resid);
+/* Make sure any sense buffer is the correct size. */
+#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
+ sshdr, timeout, retries, flags, rq_flags, resid) \
+({ \
+ BUILD_BUG_ON((sense) != NULL && \
+ sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
+ __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
+ sense, sshdr, timeout, retries, flags, rq_flags, \
+ resid); \
+})
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 53b485fe9b67..5ea06d310a25 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -758,6 +758,7 @@ extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
new file mode 100644
index 000000000000..619e07c75da9
--- /dev/null
+++ b/include/soc/qcom/rpmh.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_RPMH_H__
+#define __SOC_QCOM_RPMH_H__
+
+#include <soc/qcom/tcs.h>
+#include <linux/platform_device.h>
+
+
+#if IS_ENABLED(CONFIG_QCOM_RPMH)
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n);
+
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n);
+
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n);
+
+int rpmh_flush(const struct device *dev);
+
+int rpmh_invalidate(const struct device *dev);
+
+#else
+
+static inline int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_async(const struct device *dev,
+ enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_batch(const struct device *dev,
+ enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{ return -ENODEV; }
+
+static inline int rpmh_flush(const struct device *dev)
+{ return -ENODEV; }
+
+static inline int rpmh_invalidate(const struct device *dev)
+{ return -ENODEV; }
+
+#endif /* CONFIG_QCOM_RPMH */
+
+#endif /* __SOC_QCOM_RPMH_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
new file mode 100644
index 000000000000..262876a59e86
--- /dev/null
+++ b/include/soc/qcom/tcs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_TCS_H__
+#define __SOC_QCOM_TCS_H__
+
+#define MAX_RPMH_PAYLOAD 16
+
+/**
+ * rpmh_state: state for the request
+ *
+ * RPMH_SLEEP_STATE: State of the resource when the processor subsystem
+ * is powered down. There is no client using the
+ * resource actively.
+ * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously
+ * requested before the processor was powered down.
+ * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state
+ * is aggregated immediately.
+ */
+enum rpmh_state {
+ RPMH_SLEEP_STATE,
+ RPMH_WAKE_ONLY_STATE,
+ RPMH_ACTIVE_ONLY_STATE,
+};
+
+/**
+ * struct tcs_cmd: an individual request to RPMH.
+ *
+ * @addr: the address of the resource slv_id:18:16 | offset:0:15
+ * @data: the resource state request
+ * @wait: wait for this request to be complete before sending the next
+ */
+struct tcs_cmd {
+ u32 addr;
+ u32 data;
+ u32 wait;
+};
+
+/**
+ * struct tcs_request: A set of tcs_cmds sent together in a TCS
+ *
+ * @state: state for the request.
+ * @wait_for_compl: wait until we get a response from the h/w accelerator
+ * @num_cmds: the number of @cmds in this request
+ * @cmds: an array of tcs_cmds
+ */
+struct tcs_request {
+ enum rpmh_state state;
+ u32 wait_for_compl;
+ u32 num_cmds;
+ struct tcs_cmd *cmds;
+};
+
+#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index ec04be9ab119..9792d25fa369 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -1,10 +1,8 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*/
+
#ifndef __SOUND_AC97_CODEC2_H
#define __SOUND_AC97_CODEC2_H
diff --git a/include/sound/ac97/compat.h b/include/sound/ac97/compat.h
index 1351cba40048..57e19afa31ab 100644
--- a/include/sound/ac97/compat.h
+++ b/include/sound/ac97/compat.h
@@ -1,14 +1,11 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*
* This file is for backward compatibility with snd_ac97 structure and its
* multiple usages, such as the snd_ac97_bus and snd_ac97_build_ops.
- *
*/
+
#ifndef AC97_COMPAT_H
#define AC97_COMPAT_H
diff --git a/include/sound/ac97/controller.h b/include/sound/ac97/controller.h
index b36ecdd64f14..06b5afb7fa6b 100644
--- a/include/sound/ac97/controller.h
+++ b/include/sound/ac97/controller.h
@@ -1,10 +1,8 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*/
+
#ifndef AC97_CONTROLLER_H
#define AC97_CONTROLLER_H
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 9a4fa0c3264a..843f73f3705a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -1,27 +1,11 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.1
* by Intel Corporation (http://developer.intel.com).
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-
/*
* AC'97 codec registers
*/
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 89d311a503d3..cc383991c0fe 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -1,30 +1,15 @@
-#ifndef __SOUND_AC97_CODEC_H
-#define __SOUND_AC97_CODEC_H
-
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.1
* by Intel Corporation (http://developer.intel.com).
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
+#ifndef __SOUND_AC97_CODEC_H
+#define __SOUND_AC97_CODEC_H
+
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/workqueue.h>
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 9924bc9cbc7c..ea8c93bbb0e0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -1,27 +1,12 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* compress_driver.h - compress offload driver definations
*
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
*/
+
#ifndef __COMPRESS_DRIVER_H
#define __COMPRESS_DRIVER_H
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index e3481eebdd98..2c4cfaa135a6 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -1,17 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
+
#ifndef __SOUND_DMAENGINE_PCM_H__
#define __SOUND_DMAENGINE_PCM_H__
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
new file mode 100644
index 000000000000..78626cde7081
--- /dev/null
+++ b/include/sound/hda_component.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+// HD-Audio helpers to sync with DRM driver
+
+#ifndef __SOUND_HDA_COMPONENT_H
+#define __SOUND_HDA_COMPONENT_H
+
+#include <drm/drm_audio_component.h>
+
+#ifdef CONFIG_SND_HDA_COMPONENT
+int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
+int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
+ bool *audio_enabled, char *buffer, int max_bytes);
+int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size);
+int snd_hdac_acomp_exit(struct hdac_bus *bus);
+int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *ops);
+#else
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
+ hda_nid_t nid, int dev_id, int rate)
+{
+ return 0;
+}
+static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, bool *audio_enabled,
+ char *buffer, int max_bytes)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_acomp_exit(struct hdac_bus *bus)
+{
+ return 0;
+}
+static inline int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *ops)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __SOUND_HDA_COMPONENT_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index a94f5b6f92ac..6b79614a893b 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -5,54 +5,23 @@
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
-#include <drm/i915_component.h>
+#include "hda_component.h"
#ifdef CONFIG_SND_HDA_I915
-int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
- bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
-int snd_hdac_i915_exit(struct hdac_bus *bus);
-int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
#else
-static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
-{
- return 0;
-}
-static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
-{
- return 0;
-}
static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
}
-static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
- hda_nid_t nid, int dev_id, int rate)
-{
- return 0;
-}
-static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, bool *audio_enabled,
- char *buffer, int max_bytes)
-{
- return -ENODEV;
-}
static inline int snd_hdac_i915_init(struct hdac_bus *bus)
{
return -ENODEV;
}
+#endif
static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
{
- return 0;
+ return snd_hdac_acomp_exit(bus);
}
-static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
-{
- return -ENODEV;
-}
-#endif
#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index c052afc27547..6f1e1f3b3063 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -8,8 +8,10 @@
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
+#include <sound/pcm.h>
#include <sound/memalloc.h>
#include <sound/hda_verbs.h>
#include <drm/i915_component.h>
@@ -132,7 +134,7 @@ int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *start_id);
unsigned int snd_hdac_calc_stream_format(unsigned int rate,
unsigned int channels,
- unsigned int format,
+ snd_pcm_format_t format,
unsigned int maxbps,
unsigned short spdif_ctls);
int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
@@ -171,12 +173,38 @@ int snd_hdac_power_down(struct hdac_device *codec);
int snd_hdac_power_up_pm(struct hdac_device *codec);
int snd_hdac_power_down_pm(struct hdac_device *codec);
int snd_hdac_keep_power_up(struct hdac_device *codec);
+
+/* call this at entering into suspend/resume callbacks in codec driver */
+static inline void snd_hdac_enter_pm(struct hdac_device *codec)
+{
+ atomic_inc(&codec->in_pm);
+}
+
+/* call this at leaving from suspend/resume callbacks in codec driver */
+static inline void snd_hdac_leave_pm(struct hdac_device *codec)
+{
+ atomic_dec(&codec->in_pm);
+}
+
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec)
+{
+ return atomic_read(&codec->in_pm);
+}
+
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec)
+{
+ return !pm_runtime_suspended(&codec->dev);
+}
#else
static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
+static inline void snd_hdac_enter_pm(struct hdac_device *codec) {}
+static inline void snd_hdac_leave_pm(struct hdac_device *codec) {}
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; }
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; }
#endif
/*
@@ -188,6 +216,11 @@ struct hdac_driver {
const struct hda_device_id *id_table;
int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+
+ /* fields used by ext bus APIs */
+ int (*probe)(struct hdac_device *dev);
+ int (*remove)(struct hdac_device *dev);
+ void (*shutdown)(struct hdac_device *dev);
};
#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
@@ -209,6 +242,14 @@ struct hdac_bus_ops {
};
/*
+ * ops used for ASoC HDA codec drivers
+ */
+struct hdac_ext_bus_ops {
+ int (*hdev_attach)(struct hdac_device *hdev);
+ int (*hdev_detach)(struct hdac_device *hdev);
+};
+
+/*
* Lowlevel I/O operators
*/
struct hdac_io_ops {
@@ -250,11 +291,17 @@ struct hdac_rb {
* @mlcap: MultiLink capabilities pointer
* @gtscap: gts capabilities pointer
* @drsmcap: dma resume capabilities pointer
+ * @num_streams: streams supported
+ * @idx: HDA link index
+ * @hlink_list: link list of HDA links
+ * @lock: lock for link mgmt
+ * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
*/
struct hdac_bus {
struct device *dev;
const struct hdac_bus_ops *ops;
const struct hdac_io_ops *io_ops;
+ const struct hdac_ext_bus_ops *ext_ops;
/* h/w resources */
unsigned long addr;
@@ -314,9 +361,19 @@ struct hdac_bus {
spinlock_t reg_lock;
struct mutex cmd_mutex;
- /* i915 component interface */
- struct i915_audio_component *audio_component;
- int i915_power_refcount;
+ /* DRM component interface */
+ struct drm_audio_component *audio_component;
+ int drm_power_refcount;
+
+ /* parameters required for enhanced capabilities */
+ int num_streams;
+ int idx;
+
+ struct list_head hlink_list;
+
+ struct mutex lock;
+ bool cmd_dma_state;
+
};
int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 9c14e21dda85..f34aced69ca8 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -4,38 +4,16 @@
#include <sound/hdaudio.h>
-/**
- * hdac_ext_bus: HDAC extended bus for extended HDA caps
- *
- * @bus: hdac bus
- * @num_streams: streams supported
- * @hlink_list: link list of HDA links
- * @lock: lock for link mgmt
- * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
- */
-struct hdac_ext_bus {
- struct hdac_bus bus;
- int num_streams;
- int idx;
-
- struct list_head hlink_list;
-
- struct mutex lock;
- bool cmd_dma_state;
-};
-
-int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
+int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_bus_ops *ops,
- const struct hdac_io_ops *io_ops);
+ const struct hdac_io_ops *io_ops,
+ const struct hdac_ext_bus_ops *ext_ops);
-void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
-int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
+void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
+int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
+ struct hdac_device *hdev);
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
-void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
-
-#define ebus_to_hbus(ebus) (&(ebus)->bus)
-#define hbus_to_ebus(_bus) \
- container_of(_bus, struct hdac_ext_bus, bus)
+void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
{ .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \
@@ -44,14 +22,14 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \
HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data)
-void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
-void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
-void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *chip,
+void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip,
bool enable, int index);
-int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *bus);
-struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *bus,
+int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
const char *codec_name);
enum hdac_ext_stream_type {
@@ -100,28 +78,28 @@ struct hdac_ext_stream {
#define stream_to_hdac_ext_stream(s) \
container_of(s, struct hdac_ext_stream, hstream)
-void snd_hdac_ext_stream_init(struct hdac_ext_bus *bus,
+void snd_hdac_ext_stream_init(struct hdac_bus *bus,
struct hdac_ext_stream *stream, int idx,
int direction, int tag);
-int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
+int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
int num_stream, int dir);
-void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus);
-void snd_hdac_link_free_all(struct hdac_ext_bus *ebus);
-struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *bus,
+void snd_hdac_stream_free_all(struct hdac_bus *bus);
+void snd_hdac_link_free_all(struct hdac_bus *bus);
+struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
-void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
+void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
+void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
-int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
struct hdac_ext_stream *stream);
-void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
-int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
@@ -144,17 +122,15 @@ struct hdac_ext_link {
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_power_up_all(struct hdac_ext_bus *ebus);
-int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
+int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus);
void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
int stream);
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int stream);
-int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
- struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
- struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
/* update register macro */
#define snd_hdac_updatel(addr, reg, mask, val) \
@@ -181,53 +157,12 @@ struct hda_dai_map {
u32 maxbps;
};
-#define HDA_MAX_NIDS 16
-
-/**
- * struct hdac_ext_device - HDAC Ext device
- *
- * @hdac: hdac core device
- * @nid_list - the dai map which matches the dai-name with the nid
- * @map_cur_idx - the idx in use in dai_map
- * @ops - the hda codec ops common to all codec drivers
- * @pvt_data - private data, for asoc contains asoc codec object
- */
-struct hdac_ext_device {
- struct hdac_device hdev;
- struct hdac_ext_bus *ebus;
-
- /* soc-dai to nid map */
- struct hda_dai_map nid_list[HDA_MAX_NIDS];
- unsigned int map_cur_idx;
-
- /* codec ops */
- struct hdac_ext_codec_ops ops;
-
- struct snd_card *card;
- void *scodec;
- void *private_data;
-};
-
struct hdac_ext_dma_params {
u32 format;
u8 stream_tag;
};
-#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdev))
-/*
- * HD-audio codec base driver
- */
-struct hdac_ext_driver {
- struct hdac_driver hdac;
-
- int (*probe)(struct hdac_ext_device *dev);
- int (*remove)(struct hdac_ext_device *dev);
- void (*shutdown)(struct hdac_ext_device *dev);
-};
-
-int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
-void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
-#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+int snd_hda_ext_driver_register(struct hdac_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_driver *drv);
#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 9c3db3dce32b..67561b997915 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -24,6 +24,8 @@
#ifndef __SOUND_MEMALLOC_H
#define __SOUND_MEMALLOC_H
+#include <asm/page.h>
+
struct device;
/*
@@ -67,6 +69,14 @@ struct snd_dma_buffer {
void *private_data; /* private for allocator; don't touch */
};
+/*
+ * return the pages matching with the given byte size
+ */
+static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
+{
+ return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
#ifdef CONFIG_SND_DMA_SGBUF
/*
* Scatter-Gather generic device pages
@@ -91,14 +101,6 @@ struct snd_sg_buf {
};
/*
- * return the pages matching with the given byte size
- */
-static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
-{
- return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-}
-
-/*
* return the physical address at the corresponding offset
*/
static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e054c583d3b3..d6bd3caf6878 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -462,6 +462,7 @@ struct snd_pcm_substream {
/* -- timer section -- */
struct snd_timer *timer; /* timer */
unsigned timer_running: 1; /* time is running */
+ long wait_time; /* time in ms for R/W to wait for avail */
/* -- next substream -- */
struct snd_pcm_substream *next;
/* -- linked substreams -- */
@@ -1089,14 +1090,14 @@ static inline snd_pcm_sframes_t
snd_pcm_lib_write(struct snd_pcm_substream *substream,
const void __user *buf, snd_pcm_uframes_t frames)
{
- return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+ return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
}
static inline snd_pcm_sframes_t
snd_pcm_lib_read(struct snd_pcm_substream *substream,
void __user *buf, snd_pcm_uframes_t frames)
{
- return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+ return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
}
static inline snd_pcm_sframes_t
@@ -1341,8 +1342,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
-#define snd_pcm_lib_mmap_vmalloc NULL
-
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
* @dma: DMA number
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index c704357775fc..2dd37cada7c0 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -87,6 +87,13 @@ static inline void snd_mask_set(struct snd_mask *mask, unsigned int val)
mask->bits[MASK_OFS(val)] |= MASK_BIT(val);
}
+/* Most of drivers need only this one */
+static inline void snd_mask_set_format(struct snd_mask *mask,
+ snd_pcm_format_t format)
+{
+ snd_mask_set(mask, (__force unsigned int)format);
+}
+
static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val)
{
mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val);
@@ -369,8 +376,7 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
static inline void
params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
{
- snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT),
- (__force int)fmt);
+ snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt);
}
#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 63f75450d3db..6758fc12fa84 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -8,20 +8,23 @@
/* PCM */
struct snd_pcm_substream;
struct snd_pcm_hw_params;
+struct snd_soc_pcm_runtime;
struct snd_pcm;
-extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
+extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_open(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_close(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
+extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
+extern const struct snd_pcm_ops pxa2xx_pcm_ops;
/* AC97 */
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
new file mode 100644
index 000000000000..0251797ab438
--- /dev/null
+++ b/include/sound/rt5682.h
@@ -0,0 +1,40 @@
+/*
+ * linux/sound/rt5682.h -- Platform data for RT5682
+ *
+ * Copyright 2018 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5682_H
+#define __LINUX_SND_RT5682_H
+
+enum rt5682_dmic1_data_pin {
+ RT5682_DMIC1_NULL,
+ RT5682_DMIC1_DATA_GPIO2,
+ RT5682_DMIC1_DATA_GPIO5,
+};
+
+enum rt5682_dmic1_clk_pin {
+ RT5682_DMIC1_CLK_GPIO1,
+ RT5682_DMIC1_CLK_GPIO3,
+};
+
+enum rt5682_jd_src {
+ RT5682_JD_NULL,
+ RT5682_JD1,
+};
+
+struct rt5682_platform_data {
+
+ int ldo1_en; /* GPIO for LDO1_EN */
+
+ enum rt5682_dmic1_data_pin dmic1_data_pin;
+ enum rt5682_dmic1_clk_pin dmic1_clk_pin;
+ enum rt5682_jd_src jd_src;
+};
+
+#endif
+
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
index c7c7788005e4..7817e88bd08d 100644
--- a/include/sound/sb16_csp.h
+++ b/include/sound/sb16_csp.h
@@ -46,7 +46,7 @@ enum {
struct snd_sb_csp_ops {
int (*csp_use) (struct snd_sb_csp * p);
int (*csp_unuse) (struct snd_sb_csp * p);
- int (*csp_autoload) (struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode);
+ int (*csp_autoload) (struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
int (*csp_stop) (struct snd_sb_csp * p);
int (*csp_qsound_transfer) (struct snd_sb_csp * p);
diff --git a/include/sound/seq_midi_event.h b/include/sound/seq_midi_event.h
index e40f43e6fc7b..2f135bccf457 100644
--- a/include/sound/seq_midi_event.h
+++ b/include/sound/seq_midi_event.h
@@ -43,10 +43,8 @@ void snd_midi_event_free(struct snd_midi_event *dev);
void snd_midi_event_reset_encode(struct snd_midi_event *dev);
void snd_midi_event_reset_decode(struct snd_midi_event *dev);
void snd_midi_event_no_status(struct snd_midi_event *dev, int on);
-/* encode from byte stream - return number of written bytes if success */
-long snd_midi_event_encode(struct snd_midi_event *dev, unsigned char *buf, long count,
- struct snd_seq_event *ev);
-int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c, struct snd_seq_event *ev);
+bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c,
+ struct snd_seq_event *ev);
/* decode from event to bytes - return number of written bytes if success */
long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long count,
struct snd_seq_event *ev);
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index 695257ae64ac..796ce7772213 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -36,11 +36,12 @@ struct snd_virmidi {
int seq_mode;
int client;
int port;
- unsigned int trigger: 1;
+ bool trigger;
struct snd_midi_event *parser;
struct snd_seq_event event;
struct snd_virmidi_dev *rdev;
struct snd_rawmidi_substream *substream;
+ struct work_struct output_work;
};
#define SNDRV_VIRMIDI_SUBSCRIBE (1<<0)
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index 7a9710b4b799..89eafe23ef88 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -1,16 +1,13 @@
-#ifndef __SOUND_FSI_H
-#define __SOUND_FSI_H
-
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Fifo-attached Serial Interface (FSI) support for SH7724
*
* Copyright (C) 2009 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef __SOUND_FSI_H
+#define __SOUND_FSI_H
+
#include <linux/clk.h>
#include <sound/soc.h>
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index a6a2e1547092..d264e5463f22 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -1,12 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* ASoC simple sound card support
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SIMPLE_CARD_H
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 7e25afce6566..8bc5e2d8b13c 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -1,17 +1,20 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* simple_card_utils.h
*
* Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+
#ifndef __SIMPLE_CARD_UTILS_H
#define __SIMPLE_CARD_UTILS_H
#include <sound/soc.h>
+#define asoc_simple_card_init_hp(card, sjack, prefix) \
+ asoc_simple_card_init_jack(card, sjack, 1, prefix)
+#define asoc_simple_card_init_mic(card, sjack, prefix) \
+ asoc_simple_card_init_jack(card, sjack, 0, prefix)
+
struct asoc_simple_dai {
const char *name;
unsigned int sysclk;
@@ -28,6 +31,12 @@ struct asoc_simple_card_data {
u32 convert_channels;
};
+struct asoc_simple_jack {
+ struct snd_soc_jack jack;
+ struct snd_soc_jack_pin pin;
+ struct snd_soc_jack_gpio gpio;
+};
+
int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
@@ -107,4 +116,8 @@ int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
char *prefix);
+int asoc_simple_card_init_jack(struct snd_soc_card *card,
+ struct asoc_simple_jack *sjack,
+ int is_hp, char *prefix);
+
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 9da6388c20a1..bb1d24b703fb 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -1,16 +1,6 @@
-
-/*
- * Copyright (C) 2017, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0
*
+ * Copyright (C) 2017, Intel Corporation. All rights reserved.
*/
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
@@ -29,5 +19,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 082224275f52..e45b2330d16a 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0
*
+ * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
*/
#ifndef __LINUX_SND_SOC_ACPI_H
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e6f8c40ed43c..f5d70041108f 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -1,12 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dai.h -- ALSA SoC Layer
*
* Copyright: 2005-2008 Wolfson Microelectronics. PLC.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Digital Audio Interface (DAI) API.
*/
@@ -141,6 +138,11 @@ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction);
+
+int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot);
+
int snd_soc_dai_is_dummy(struct snd_soc_dai *dai);
struct snd_soc_dai_ops {
@@ -168,6 +170,9 @@ struct snd_soc_dai_ops {
int (*set_channel_map)(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot);
+ int (*get_channel_map)(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
int (*set_sdw_stream)(struct snd_soc_dai *dai,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a6ce2de4e20a..af9ef16cc34d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dapm.h -- ALSA SoC Dynamic Audio Power Management
*
- * Author: Liam Girdwood
- * Created: Aug 11th 2005
+ * Author: Liam Girdwood
+ * Created: Aug 11th 2005
* Copyright: Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_DAPM_H
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 806059052bfc..9bb92f187af8 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dpcm.h -- ALSA SoC Dynamic PCM Support
*
* Author: Liam Girdwood <lrg@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_DPCM_H
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index f552c3f56368..fa4b8413d2e2 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM
*
* Copyright (C) 2012 Texas Instruments Inc.
* Copyright (C) 2015 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
* algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc.
*/
@@ -30,6 +27,9 @@ struct snd_soc_dapm_context;
struct snd_soc_card;
struct snd_kcontrol_new;
struct snd_soc_dai_link;
+struct snd_soc_dai_driver;
+struct snd_soc_dai;
+struct snd_soc_dapm_route;
/* object scan be loaded and unloaded in groups with identfying indexes */
#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
@@ -109,35 +109,44 @@ struct snd_soc_tplg_widget_events {
struct snd_soc_tplg_ops {
/* external kcontrol init - used for any driver specific init */
- int (*control_load)(struct snd_soc_component *,
+ int (*control_load)(struct snd_soc_component *, int index,
struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *);
int (*control_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
+ /* DAPM graph route element loading and unloading */
+ int (*dapm_route_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dapm_route *route);
+ int (*dapm_route_unload)(struct snd_soc_component *,
+ struct snd_soc_dobj *);
+
/* external widget init - used for any driver specific init */
- int (*widget_load)(struct snd_soc_component *,
+ int (*widget_load)(struct snd_soc_component *, int index,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
- int (*widget_ready)(struct snd_soc_component *,
+ int (*widget_ready)(struct snd_soc_component *, int index,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
int (*widget_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* FE DAI - used for any driver specific init */
- int (*dai_load)(struct snd_soc_component *,
- struct snd_soc_dai_driver *dai_drv);
+ int (*dai_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai);
+
int (*dai_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* DAI link - used for any driver specific init */
- int (*link_load)(struct snd_soc_component *,
- struct snd_soc_dai_link *link);
+ int (*link_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg);
int (*link_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* callback to handle vendor bespoke data */
- int (*vendor_load)(struct snd_soc_component *,
+ int (*vendor_load)(struct snd_soc_component *, int index,
struct snd_soc_tplg_hdr *);
int (*vendor_unload)(struct snd_soc_component *,
struct snd_soc_tplg_hdr *);
@@ -146,7 +155,7 @@ struct snd_soc_tplg_ops {
void (*complete)(struct snd_soc_component *);
/* manifest - optional to inform component of manifest */
- int (*manifest)(struct snd_soc_component *,
+ int (*manifest)(struct snd_soc_component *, int index,
struct snd_soc_tplg_manifest *);
/* vendor specific kcontrol handlers available for binding */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1378dcd2128a..41cec42fb456 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc.h -- ALSA SoC Layer
*
- * Author: Liam Girdwood
- * Created: Aug 11th 2005
+ * Author: Liam Girdwood
+ * Created: Aug 11th 2005
* Copyright: Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_H
@@ -806,6 +803,14 @@ struct snd_soc_component_driver {
unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
unsigned int endianness:1;
unsigned int non_legacy_dai_naming:1;
+
+ /* this component uses topology and ignore machine driver FEs */
+ const char *ignore_machine;
+ const char *topology_name_prefix;
+ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+ bool use_dai_pcm_id; /* use the DAI link PCM ID as PCM device number */
+ int be_pcm_base; /* base device ID for all BE PCMs */
};
struct snd_soc_component {
@@ -957,10 +962,17 @@ struct snd_soc_dai_link {
/* DPCM used FE & BE merged format */
unsigned int dpcm_merged_format:1;
+ /* DPCM used FE & BE merged channel */
+ unsigned int dpcm_merged_chan:1;
+ /* DPCM used FE & BE merged rate */
+ unsigned int dpcm_merged_rate:1;
/* pmdown_time is ignored at stop */
unsigned int ignore_pmdown_time:1;
+ /* Do not create a PCM for this DAI link (Backend link) */
+ unsigned int ignore:1;
+
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
@@ -1000,6 +1012,7 @@ struct snd_soc_card {
const char *long_name;
const char *driver_name;
char dmi_longname[80];
+ char topology_shortname[32];
struct device *dev;
struct snd_card *snd_card;
@@ -1009,6 +1022,7 @@ struct snd_soc_card {
struct mutex dapm_mutex;
bool instantiated;
+ bool topology_shortname_created;
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
@@ -1412,6 +1426,9 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
const char *propname);
+int snd_soc_of_get_slot_mask(struct device_node *np,
+ const char *prop_name,
+ unsigned int *mask);
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *tx_mask,
unsigned int *rx_mask,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index cf5f3fff1f1a..f2e6abea8490 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -4,6 +4,7 @@
#include <linux/dma-direction.h> /* enum dma_data_direction */
#include <linux/list.h> /* struct list_head */
+#include <linux/sched.h>
#include <linux/socket.h> /* struct sockaddr_storage */
#include <linux/types.h> /* u8 */
#include <scsi/iscsi_proto.h> /* itt_t */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 34a15d59ed88..51b6f50eabee 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -106,13 +106,15 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
-struct se_device *target_find_device(int id, bool do_depend);
-
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q);
+static inline bool target_dev_configured(struct se_device *se_dev)
+{
+ return !!(se_dev->dev_flags & DF_CONFIGURED);
+}
/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
static inline uint32_t get_unaligned_be24(const uint8_t *const p)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 922a39f45abc..7a4ee7852ca4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -4,7 +4,7 @@
#include <linux/configfs.h> /* struct config_group */
#include <linux/dma-direction.h> /* enum dma_data_direction */
-#include <linux/percpu_ida.h> /* struct percpu_ida */
+#include <linux/sbitmap.h>
#include <linux/percpu-refcount.h>
#include <linux/semaphore.h> /* struct semaphore */
#include <linux/completion.h>
@@ -443,7 +443,6 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
- unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
bool state_active:1;
u64 tag; /* SAM command identifier aka task tag */
@@ -455,6 +454,7 @@ struct se_cmd {
int sam_task_attr;
/* Used for se_sess->sess_tag_pool */
unsigned int map_tag;
+ int map_cpu;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* See se_cmd_flags_table */
@@ -475,7 +475,7 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion cmd_wait_comp;
+ struct completion *compl;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -605,10 +605,10 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
- struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
+ wait_queue_head_t cmd_list_wq;
void *sess_cmd_map;
- struct percpu_ida sess_tag_pool;
+ struct sbitmap_queue sess_tag_pool;
};
struct se_device;
@@ -638,7 +638,6 @@ struct se_dev_entry {
atomic_long_t total_cmds;
atomic_long_t read_bytes;
atomic_long_t write_bytes;
- atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
@@ -934,4 +933,9 @@ static inline void atomic_dec_mb(atomic_t *v)
smp_mb__after_atomic();
}
+static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd)
+{
+ sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu);
+}
+
#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index b297aa0d9651..f4147b398431 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -79,7 +79,7 @@ struct target_core_fabric_ops {
void (*fabric_drop_wwn)(struct se_wwn *);
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
- struct config_group *, const char *);
+ const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
@@ -109,17 +109,17 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item);
-struct se_session *target_alloc_session(struct se_portal_group *,
+struct se_session *target_setup_session(struct se_portal_group *,
unsigned int, unsigned int, enum target_prot_op prot_op,
const char *, void *,
int (*callback)(struct se_portal_group *,
struct se_session *, void *));
+void target_remove_session(struct se_session *);
-struct se_session *transport_init_session(enum target_prot_op);
+void transport_init_session(struct se_session *);
+struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int,
- enum target_prot_op);
void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 39b94ec965be..b401c4e36394 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(
__entry->extent_type = btrfs_file_extent_type(l, fi);
__entry->compression = btrfs_file_extent_compression(l, fi);
__entry->extent_start = start;
- __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi));
+ __entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi));
),
TP_printk_btrfs(
@@ -433,7 +433,6 @@ DEFINE_EVENT(
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
{ (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
- { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 2cd449328aee..9004ffff7f32 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -192,6 +192,42 @@ DEFINE_EVENT(clk_phase, clk_set_phase_complete,
TP_ARGS(core, phase)
);
+DECLARE_EVENT_CLASS(clk_duty_cycle,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field( unsigned int, num )
+ __field( unsigned int, den )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->num = duty->num;
+ __entry->den = duty->den;
+ ),
+
+ TP_printk("%s %u/%u", __get_str(name), (unsigned int)__entry->num,
+ (unsigned int)__entry->den)
+);
+
+DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty)
+);
+
+DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty)
+);
+
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 9763cddd0594..6271bab63bfb 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -22,6 +22,7 @@ TRACE_EVENT(fib_table_lookup,
__field( int, err )
__field( int, oif )
__field( int, iif )
+ __field( u8, proto )
__field( __u8, tos )
__field( __u8, scope )
__field( __u8, flags )
@@ -31,7 +32,6 @@ TRACE_EVENT(fib_table_lookup,
__array( __u8, saddr, 4 )
__field( u16, sport )
__field( u16, dport )
- __field( u8, proto )
__dynamic_array(char, name, IFNAMSIZ )
),
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index d1faf3597b9d..68b17c116907 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -112,8 +112,11 @@ DEFINE_EVENT(filelock_lock, locks_remove_posix,
TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
TP_ARGS(inode, fl, ret));
-DECLARE_EVENT_CLASS(filelock_lease,
+DEFINE_EVENT(filelock_lock, flock_lock_inode,
+ TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
+ TP_ARGS(inode, fl, ret));
+DECLARE_EVENT_CLASS(filelock_lease,
TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl),
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 9c886739246a..00aa72ce0e7c 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
TP_ARGS(skb)
);
+DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
TP_PROTO(const struct sk_buff *skb),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 908977d69783..f7aece721aed 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -5,6 +5,7 @@
#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_POWER_H
+#include <linux/cpufreq.h>
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
@@ -148,6 +149,30 @@ DEFINE_EVENT(cpu, cpu_frequency,
TP_ARGS(frequency, cpu_id)
);
+TRACE_EVENT(cpu_frequency_limits,
+
+ TP_PROTO(struct cpufreq_policy *policy),
+
+ TP_ARGS(policy),
+
+ TP_STRUCT__entry(
+ __field(u32, min_freq)
+ __field(u32, max_freq)
+ __field(u32, cpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->min_freq = policy->min;
+ __entry->max_freq = policy->max;
+ __entry->cpu_id = policy->cpu;
+ ),
+
+ TP_printk("min=%lu max=%lu cpu_id=%lu",
+ (unsigned long)__entry->min_freq,
+ (unsigned long)__entry->max_freq,
+ (unsigned long)__entry->cpu_id)
+);
+
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5936aac357ab..a8d07feff6a0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
+ * "cpuofl-bgp": CPU goes offline while blocking a grace period.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
*/
TRACE_EVENT(rcu_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
- TP_ARGS(rcuname, gpnum, gpevent),
+ TP_ARGS(rcuname, gp_seq, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->gpevent = gpevent;
),
TP_printk("%s %lu %s",
- __entry->rcuname, __entry->gpnum, __entry->gpevent)
+ __entry->rcuname, __entry->gp_seq, __entry->gpevent)
);
/*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
*
* "Startleaf": Request a grace period based on leaf-node data.
* "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
* "Startedroot": Requested a nocb grace period based on root-node data.
* "NoGPkthread": The RCU grace-period kthread has not yet started.
* "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
*/
TRACE_EVENT(rcu_future_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
- unsigned long c, u8 level, int grplo, int grphi,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
+ unsigned long gp_seq_req, u8 level, int grplo, int grphi,
const char *gpevent),
- TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+ TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
- __field(unsigned long, completed)
- __field(unsigned long, c)
+ __field(unsigned long, gp_seq)
+ __field(unsigned long, gp_seq_req)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
- __entry->completed = completed;
- __entry->c = c;
+ __entry->gp_seq = gp_seq;
+ __entry->gp_seq_req = gp_seq_req;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %lu %lu %u %d %d %s",
- __entry->rcuname, __entry->gpnum, __entry->completed,
- __entry->c, __entry->level, __entry->grplo, __entry->grphi,
- __entry->gpevent)
+ TP_printk("%s %lu %lu %u %d %d %s",
+ __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gpevent)
);
/*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
*/
TRACE_EVENT(rcu_grace_period_init,
- TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
int grplo, int grphi, unsigned long qsmask),
- TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+ TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
),
TP_printk("%s %lu %u %d %d %lx",
- __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->rcuname, __entry->gp_seq, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
*/
TRACE_EVENT(rcu_preempt_task,
- TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+ TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
- TP_ARGS(rcuname, pid, gpnum),
+ TP_ARGS(rcuname, pid, gp_seq),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
TP_printk("%s %lu %d",
- __entry->rcuname, __entry->gpnum, __entry->pid)
+ __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
*/
TRACE_EVENT(rcu_unlock_preempted_task,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
- TP_ARGS(rcuname, gpnum, pid),
+ TP_ARGS(rcuname, gp_seq, pid),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
*/
TRACE_EVENT(rcu_quiescent_state_report,
- TP_PROTO(const char *rcuname, unsigned long gpnum,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
- TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+ TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
),
TP_printk("%s %lu %lx>%lx %u %d %d %u",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
);
/*
* Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
*/
TRACE_EVENT(rcu_fqs,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
- TP_ARGS(rcuname, gpnum, cpu, qsevent),
+ TP_ARGS(rcuname, gp_seq, cpu, qsevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
TP_printk("%s %lu %d %s",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->cpu, __entry->qsevent)
);
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
#else /* #ifdef CONFIG_RCU_TRACE */
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
level, grplo, grphi, event) \
do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
qsmask) do { } while (0)
#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
do { } while (0)
#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
grplo, grphi, gp_tasks) do { } \
while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4fff00e9da8a..196587b8f204 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -211,18 +211,18 @@ enum rxrpc_congest_change {
rxrpc_cong_saw_nack,
};
-enum rxrpc_tx_fail_trace {
- rxrpc_tx_fail_call_abort,
- rxrpc_tx_fail_call_ack,
- rxrpc_tx_fail_call_data_frag,
- rxrpc_tx_fail_call_data_nofrag,
- rxrpc_tx_fail_call_final_resend,
- rxrpc_tx_fail_conn_abort,
- rxrpc_tx_fail_conn_challenge,
- rxrpc_tx_fail_conn_response,
- rxrpc_tx_fail_reject,
- rxrpc_tx_fail_version_keepalive,
- rxrpc_tx_fail_version_reply,
+enum rxrpc_tx_point {
+ rxrpc_tx_point_call_abort,
+ rxrpc_tx_point_call_ack,
+ rxrpc_tx_point_call_data_frag,
+ rxrpc_tx_point_call_data_nofrag,
+ rxrpc_tx_point_call_final_resend,
+ rxrpc_tx_point_conn_abort,
+ rxrpc_tx_point_rxkad_challenge,
+ rxrpc_tx_point_rxkad_response,
+ rxrpc_tx_point_reject,
+ rxrpc_tx_point_version_keepalive,
+ rxrpc_tx_point_version_reply,
};
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -396,7 +396,7 @@ enum rxrpc_tx_fail_trace {
#define rxrpc_propose_ack_outcomes \
EM(rxrpc_propose_ack_subsume, " Subsume") \
EM(rxrpc_propose_ack_update, " Update") \
- E_(rxrpc_propose_ack_use, "")
+ E_(rxrpc_propose_ack_use, " New")
#define rxrpc_congest_modes \
EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
@@ -452,18 +452,18 @@ enum rxrpc_tx_fail_trace {
EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
-#define rxrpc_tx_fail_traces \
- EM(rxrpc_tx_fail_call_abort, "CallAbort") \
- EM(rxrpc_tx_fail_call_ack, "CallAck") \
- EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \
- EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \
- EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \
- EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \
- EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \
- EM(rxrpc_tx_fail_conn_response, "ConnResp") \
- EM(rxrpc_tx_fail_reject, "Reject") \
- EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \
- E_(rxrpc_tx_fail_version_reply, "VerReply")
+#define rxrpc_tx_points \
+ EM(rxrpc_tx_point_call_abort, "CallAbort") \
+ EM(rxrpc_tx_point_call_ack, "CallAck") \
+ EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \
+ EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \
+ EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
+ EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
+ EM(rxrpc_tx_point_reject, "Reject") \
+ EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
+ EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \
+ EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
+ E_(rxrpc_tx_point_version_reply, "VerReply")
/*
* Export enum symbols via userspace.
@@ -488,7 +488,7 @@ rxrpc_propose_ack_traces;
rxrpc_propose_ack_outcomes;
rxrpc_congest_modes;
rxrpc_congest_changes;
-rxrpc_tx_fail_traces;
+rxrpc_tx_points;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -801,7 +801,7 @@ TRACE_EVENT(rxrpc_transmit,
);
TRACE_EVENT(rxrpc_rx_data,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ TP_PROTO(unsigned int call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, u8 anno),
TP_ARGS(call, seq, serial, flags, anno),
@@ -815,7 +815,7 @@ TRACE_EVENT(rxrpc_rx_data,
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -918,6 +918,37 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
__entry->wake ? " wake" : "")
);
+TRACE_EVENT(rxrpc_tx_packet,
+ TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr,
+ enum rxrpc_tx_point where),
+
+ TP_ARGS(call_id, whdr, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_tx_point, where )
+ __field_struct(struct rxrpc_wire_header, whdr )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call_id;
+ memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ ),
+
+ TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
+ __entry->call,
+ ntohl(__entry->whdr.epoch),
+ ntohl(__entry->whdr.cid),
+ ntohl(__entry->whdr.callNumber),
+ ntohs(__entry->whdr.serviceId),
+ ntohl(__entry->whdr.serial),
+ ntohl(__entry->whdr.seq),
+ __entry->whdr.type, __entry->whdr.flags,
+ __entry->whdr.type <= 15 ?
+ __print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK",
+ __print_symbolic(__entry->where, rxrpc_tx_points))
+ );
+
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
@@ -928,6 +959,8 @@ TRACE_EVENT(rxrpc_tx_data,
__field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
+ __field(u32, cid )
+ __field(u32, call_id )
__field(u8, flags )
__field(bool, retrans )
__field(bool, lose )
@@ -935,6 +968,8 @@ TRACE_EVENT(rxrpc_tx_data,
TP_fast_assign(
__entry->call = call->debug_id;
+ __entry->cid = call->cid;
+ __entry->call_id = call->call_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -942,8 +977,10 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),
- TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s",
__entry->call,
+ __entry->cid,
+ __entry->call_id,
__entry->serial,
__entry->seq,
__entry->flags,
@@ -952,7 +989,7 @@ TRACE_EVENT(rxrpc_tx_data,
);
TRACE_EVENT(rxrpc_tx_ack,
- TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
u8 reason, u8 n_acks),
@@ -968,7 +1005,7 @@ TRACE_EVENT(rxrpc_tx_ack,
),
TP_fast_assign(
- __entry->call = call ? call->debug_id : 0;
+ __entry->call = call;
__entry->serial = serial;
__entry->ack_first = ack_first;
__entry->ack_serial = ack_serial;
@@ -1434,29 +1471,29 @@ TRACE_EVENT(rxrpc_rx_icmp,
TRACE_EVENT(rxrpc_tx_fail,
TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
- enum rxrpc_tx_fail_trace what),
+ enum rxrpc_tx_point where),
- TP_ARGS(debug_id, serial, ret, what),
+ TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
__field(unsigned int, debug_id )
__field(rxrpc_serial_t, serial )
__field(int, ret )
- __field(enum rxrpc_tx_fail_trace, what )
+ __field(enum rxrpc_tx_point, where )
),
TP_fast_assign(
__entry->debug_id = debug_id;
__entry->serial = serial;
__entry->ret = ret;
- __entry->what = what;
+ __entry->where = where;
),
TP_printk("c=%08x r=%x ret=%d %s",
__entry->debug_id,
__entry->serial,
__entry->ret,
- __print_symbolic(__entry->what, rxrpc_tx_fail_traces))
+ __print_symbolic(__entry->where, rxrpc_tx_points))
);
TRACE_EVENT(rxrpc_call_reset,
@@ -1491,6 +1528,26 @@ TRACE_EVENT(rxrpc_call_reset,
__entry->tx_seq, __entry->rx_seq)
);
+TRACE_EVENT(rxrpc_notify_socket,
+ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial),
+
+ TP_ARGS(debug_id, serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(rxrpc_serial_t, serial )
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->serial = serial;
+ ),
+
+ TP_printk("c=%08x r=%08x",
+ __entry->debug_id,
+ __entry->serial)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 3176a3931107..a0c4b8a30966 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -35,6 +35,10 @@
EM(TCP_CLOSING) \
EMe(TCP_NEW_SYN_RECV)
+#define skmem_kind_names \
+ EM(SK_MEM_SEND) \
+ EMe(SK_MEM_RECV)
+
/* enums need to be exported to user space */
#undef EM
#undef EMe
@@ -44,6 +48,7 @@
family_names
inet_protocol_names
tcp_state_names
+skmem_kind_names
#undef EM
#undef EMe
@@ -59,6 +64,9 @@ tcp_state_names
#define show_tcp_state_name(val) \
__print_symbolic(val, tcp_state_names)
+#define show_skmem_kind_names(val) \
+ __print_symbolic(val, skmem_kind_names)
+
TRACE_EVENT(sock_rcvqueue_full,
TP_PROTO(struct sock *sk, struct sk_buff *skb),
@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full,
TRACE_EVENT(sock_exceed_buf_limit,
- TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
+ TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
- TP_ARGS(sk, prot, allocated),
+ TP_ARGS(sk, prot, allocated, kind),
TP_STRUCT__entry(
__array(char, name, 32)
@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit,
__field(long, allocated)
__field(int, sysctl_rmem)
__field(int, rmem_alloc)
+ __field(int, sysctl_wmem)
+ __field(int, wmem_alloc)
+ __field(int, wmem_queued)
+ __field(int, kind)
),
TP_fast_assign(
@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->allocated = allocated;
__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
+ __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
+ __entry->wmem_queued = sk->sk_wmem_queued;
+ __entry->kind = kind;
),
- TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
- "sysctl_rmem=%d rmem_alloc=%d",
+ TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s",
__entry->name,
__entry->sysctl_mem[0],
__entry->sysctl_mem[1],
__entry->sysctl_mem[2],
__entry->allocated,
__entry->sysctl_rmem,
- __entry->rmem_alloc)
+ __entry->rmem_alloc,
+ __entry->sysctl_wmem,
+ __entry->wmem_alloc,
+ __entry->wmem_queued,
+ show_skmem_kind_names(__entry->kind)
+ )
);
TRACE_EVENT(inet_sock_set_state,
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 0ae758c90e54..a12692e5f7a8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -107,4 +107,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 42990676a55e..df4bedb9b01c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
__SYSCALL(__NR_statx, sys_statx)
#define __NR_io_pgetevents 292
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
#undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 78b4dd89fcb4..1ceec56de015 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -72,6 +72,29 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+/**
+ * DOC: memory domains
+ *
+ * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
+ * Memory in this pool could be swapped out to disk if there is pressure.
+ *
+ * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
+ * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
+ * pages of system memory, allows GPU access system memory in a linezrized
+ * fashion.
+ *
+ * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
+ * carved out by the BIOS.
+ *
+ * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
+ * across shader threads.
+ *
+ * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
+ * execution of all the waves on a device.
+ *
+ * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
+ * for appending data.
+ */
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
#define AMDGPU_GEM_DOMAIN_VRAM 0x4
@@ -483,7 +506,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
#define AMDGPU_HW_IP_VCN_ENC 7
-#define AMDGPU_HW_IP_NUM 8
+#define AMDGPU_HW_IP_VCN_JPEG 8
+#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -492,6 +516,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
+#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9c660e1688ab..300f336633f2 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -687,6 +687,15 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e04613d30a13..721ab7e54d96 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -183,6 +183,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
+#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
@@ -298,6 +299,19 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+
/* Vivante framebuffer modifiers */
/*
@@ -385,6 +399,23 @@ extern "C" {
fourcc_mod_code(NVIDIA, 0x15)
/*
+ * Some Broadcom modifiers take parameters, for example the number of
+ * vertical lines in the image. Reserve the lower 32 bits for modifier
+ * type, and the next 24 bits for parameters. Top 8 bits are the
+ * vendor code.
+ */
+#define __fourcc_mod_broadcom_param_shift 8
+#define __fourcc_mod_broadcom_param_bits 48
+#define fourcc_mod_broadcom_code(val, params) \
+ fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
+#define fourcc_mod_broadcom_param(m) \
+ ((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
+ ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
+#define fourcc_mod_broadcom_mod(m) \
+ ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
+ __fourcc_mod_broadcom_param_shift))
+
+/*
* Broadcom VC4 "T" format
*
* This is the primary layout that the V3D GPU can texture from (it
@@ -405,6 +436,151 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
+/*
+ * Broadcom SAND format
+ *
+ * This is the native format that the H.264 codec block uses. For VC4
+ * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
+ *
+ * The image can be considered to be split into columns, and the
+ * columns are placed consecutively into memory. The width of those
+ * columns can be either 32, 64, 128, or 256 pixels, but in practice
+ * only 128 pixel columns are used.
+ *
+ * The pitch between the start of each column is set to optimally
+ * switch between SDRAM banks. This is passed as the number of lines
+ * of column width in the modifier (we can't use the stride value due
+ * to various core checks that look at it , so you should set the
+ * stride to width*cpp).
+ *
+ * Note that the column height for this format modifier is the same
+ * for all of the planes, assuming that each column contains both Y
+ * and UV. Some SAND-using hardware stores UV in a separate tiled
+ * image from Y to reduce the column height, which is not supported
+ * with these modifiers.
+ */
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(2, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(3, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(4, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(5, v)
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
+ DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
+ DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
+ DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
+ DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
+
+/* Broadcom UIF format
+ *
+ * This is the common format for the current Broadcom multimedia
+ * blocks, including V3D 3.x and newer, newer video codecs, and
+ * displays.
+ *
+ * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
+ * and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
+ * stored in columns, with padding between the columns to ensure that
+ * moving from one column to the next doesn't hit the same SDRAM page
+ * bank.
+ *
+ * To calculate the padding, it is assumed that each hardware block
+ * and the software driving it knows the platform's SDRAM page size,
+ * number of banks, and XOR address, and that it's identical between
+ * all blocks using the format. This tiling modifier will use XOR as
+ * necessary to reduce the padding. If a hardware block can't do XOR,
+ * the assumption is that a no-XOR tiling modifier will be created.
+ */
+#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
+
+/*
+ * Arm Framebuffer Compression (AFBC) modifiers
+ *
+ * AFBC is a proprietary lossless image compression protocol and format.
+ * It provides fine-grained random access and minimizes the amount of data
+ * transferred between IP blocks.
+ *
+ * AFBC has several features which may be supported and/or used, which are
+ * represented using bits in the modifier. Not all combinations are valid,
+ * and different devices or use-cases may support different combinations.
+ */
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * AFBC superblock size
+ *
+ * Indicates the superblock size(s) used for the AFBC buffer. The buffer
+ * size (in pixels) must be aligned to a multiple of the superblock size.
+ * Four lowest significant bits(LSBs) are reserved for block size.
+ */
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
+
+/*
+ * AFBC lossless colorspace transform
+ *
+ * Indicates that the buffer makes use of the AFBC lossless colorspace
+ * transform.
+ */
+#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
+
+/*
+ * AFBC block-split
+ *
+ * Indicates that the payload of each superblock is split. The second
+ * half of the payload is positioned at a predefined offset from the start
+ * of the superblock payload.
+ */
+#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
+
+/*
+ * AFBC sparse layout
+ *
+ * This flag indicates that the payload of each superblock must be stored at a
+ * predefined position relative to the other superblocks in the same AFBC
+ * buffer. This order is the same order used by the header buffer. In this mode
+ * each superblock is given the same amount of space as an uncompressed
+ * superblock of the particular format would require, rounding up to the next
+ * multiple of 128 bytes in size.
+ */
+#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
+
+/*
+ * AFBC copy-block restrict
+ *
+ * Buffers with this flag must obey the copy-block restriction. The restriction
+ * is such that there are no copy-blocks referring across the border of 8x8
+ * blocks. For the subsampled data the 8x8 limitation is also subsampled.
+ */
+#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
+
+/*
+ * AFBC tiled layout
+ *
+ * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
+ * superblocks inside a tile are stored together in memory. 8x8 tiles are used
+ * for pixel formats up to and including 32 bpp while 4x4 tiles are used for
+ * larger bpp formats. The order between the tiles is scan line.
+ * When the tiled layout is used, the buffer size (in pixels) must be aligned
+ * to the tile size.
+ */
+#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
+
+/*
+ * AFBC solid color blocks
+ *
+ * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
+ * can be reduced if a whole superblock is a single color.
+ */
+#define AFBC_FORMAT_MOD_SC (1ULL << 9)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 4b3a1bb58e68..8d67243952f4 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -96,6 +96,13 @@ extern "C" {
#define DRM_MODE_PICTURE_ASPECT_64_27 3
#define DRM_MODE_PICTURE_ASPECT_256_135 4
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
+#define DRM_MODE_CONTENT_TYPE_PHOTO 2
+#define DRM_MODE_CONTENT_TYPE_CINEMA 3
+#define DRM_MODE_CONTENT_TYPE_GAME 4
+
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
#define DRM_MODE_FLAG_PIC_AR_NONE \
@@ -344,6 +351,7 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
#define DRM_MODE_CONNECTOR_DPI 17
+#define DRM_MODE_CONNECTOR_WRITEBACK 18
struct drm_mode_get_connector {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 0bc784f5e0db..399f58317cff 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_ALLOC_BO 1
#define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3
@@ -68,6 +69,8 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
+#define DRM_VMW_GB_SURFACE_REF_EXT 28
/*************************************************************************/
/**
@@ -79,6 +82,9 @@ extern "C" {
*
* DRM_VMW_PARAM_OVERLAY_IOCTL:
* Does the driver support the overlay ioctl.
+ *
+ * DRM_VMW_PARAM_SM4_1
+ * SM4_1 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -94,6 +100,8 @@ extern "C" {
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_DX 12
+#define DRM_VMW_PARAM_HW_CAPS2 13
+#define DRM_VMW_PARAM_SM4_1 14
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -356,9 +364,9 @@ struct drm_vmw_fence_rep {
/*************************************************************************/
/**
- * DRM_VMW_ALLOC_DMABUF
+ * DRM_VMW_ALLOC_BO
*
- * Allocate a DMA buffer that is visible also to the host.
+ * Allocate a buffer object that is visible also to the host.
* NOTE: The buffer is
* identified by a handle and an offset, which are private to the guest, but
* useable in the command stream. The guest kernel may translate these
@@ -366,27 +374,28 @@ struct drm_vmw_fence_rep {
* be zero at all times, or it may disappear from the interface before it is
* fixed.
*
- * The DMA buffer may stay user-space mapped in the guest at all times,
+ * The buffer object may stay user-space mapped in the guest at all times,
* and is thus suitable for sub-allocation.
*
- * DMA buffers are mapped using the mmap() syscall on the drm device.
+ * Buffer objects are mapped using the mmap() syscall on the drm device.
*/
/**
- * struct drm_vmw_alloc_dmabuf_req
+ * struct drm_vmw_alloc_bo_req
*
* @size: Required minimum size of the buffer.
*
- * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Input data to the DRM_VMW_ALLOC_BO Ioctl.
*/
-struct drm_vmw_alloc_dmabuf_req {
+struct drm_vmw_alloc_bo_req {
__u32 size;
__u32 pad64;
};
+#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
/**
- * struct drm_vmw_dmabuf_rep
+ * struct drm_vmw_bo_rep
*
* @map_handle: Offset to use in the mmap() call used to map the buffer.
* @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +404,32 @@ struct drm_vmw_alloc_dmabuf_req {
* @cur_gmr_offset: Offset to use in the command stream when this buffer is
* referenced. See note above.
*
- * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Output data from the DRM_VMW_ALLOC_BO Ioctl.
*/
-struct drm_vmw_dmabuf_rep {
+struct drm_vmw_bo_rep {
__u64 map_handle;
__u32 handle;
__u32 cur_gmr_id;
__u32 cur_gmr_offset;
__u32 pad64;
};
+#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
/**
- * union drm_vmw_dmabuf_arg
+ * union drm_vmw_alloc_bo_arg
*
* @req: Input data as described above.
* @rep: Output data as described above.
*
- * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Argument to the DRM_VMW_ALLOC_BO Ioctl.
*/
-union drm_vmw_alloc_dmabuf_arg {
- struct drm_vmw_alloc_dmabuf_req req;
- struct drm_vmw_dmabuf_rep rep;
-};
-
-/*************************************************************************/
-/**
- * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
- *
- */
-
-/**
- * struct drm_vmw_unref_dmabuf_arg
- *
- * @handle: Handle indicating what buffer to free. Obtained from the
- * DRM_VMW_ALLOC_DMABUF Ioctl.
- *
- * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
- */
-
-struct drm_vmw_unref_dmabuf_arg {
- __u32 handle;
- __u32 pad64;
+union drm_vmw_alloc_bo_arg {
+ struct drm_vmw_alloc_bo_req req;
+ struct drm_vmw_bo_rep rep;
};
+#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
/*************************************************************************/
/**
@@ -1103,9 +1094,8 @@ union drm_vmw_extended_context_arg {
* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
* underlying resource.
*
- * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
- * The ioctl arguments therefore need to be identical in layout.
- *
+ * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
+ * Ioctl.
*/
/**
@@ -1119,7 +1109,107 @@ struct drm_vmw_handle_close_arg {
__u32 handle;
__u32 pad64;
};
+#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ *
+ * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
+ * parameter and 64 bit svga flag.
+ */
+
+/**
+ * enum drm_vmw_surface_version
+ *
+ * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
+ * svga3d surface flags split into 2, upper half and lower half.
+ */
+enum drm_vmw_surface_version {
+ drm_vmw_gb_surface_v1
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_ext_req
+ *
+ * @base: Surface create parameters.
+ * @version: Version of surface create ioctl.
+ * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
+ * @multisample_pattern: Multisampling pattern when msaa is supported.
+ * @quality_level: Precision settings for each sample.
+ * @must_be_zero: Reserved for future usage.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
+ */
+struct drm_vmw_gb_surface_create_ext_req {
+ struct drm_vmw_gb_surface_create_req base;
+ enum drm_vmw_surface_version version;
+ uint32_t svga3d_flags_upper_32_bits;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
+ uint64_t must_be_zero;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_ext_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+union drm_vmw_gb_surface_create_ext_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_ext_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+/**
+ * struct drm_vmw_gb_surface_ref_ext_rep
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_ext_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
+ */
+struct drm_vmw_gb_surface_ref_ext_rep {
+ struct drm_vmw_gb_surface_create_ext_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_ext_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at
+ * "struct drm_vmw_gb_surface_ref_ext_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_ext_arg {
+ struct drm_vmw_gb_surface_ref_ext_rep rep;
+ struct drm_vmw_surface_arg req;
+};
#if defined(__cplusplus)
}
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index d00221345c19..ce43d340f010 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
#include <linux/fs.h>
-#include <linux/signal.h>
#include <asm/byteorder.h>
typedef __kernel_ulong_t aio_context_t;
@@ -108,10 +107,5 @@ struct iocb {
#undef IFBIG
#undef IFLITTLE
-struct __aio_sigset {
- const sigset_t __user *sigmask;
- size_t sigsetsize;
-};
-
#endif /* __LINUX__AIO_ABI_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c35aee9ad4a6..818ae690ab79 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -148,6 +148,7 @@
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
+#define AUDIT_INTEGRITY_POLICY_RULE 1807 /* IMA policy rules */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
@@ -157,7 +158,8 @@
#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */
#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */
#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */
-#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */
+#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
+#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
#define AUDIT_NR_FILTERS 7
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 821f71a2e48f..8d19e02d752a 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -195,7 +195,7 @@ struct cache_sb {
};
};
- __u32 last_mount; /* time_t */
+ __u32 last_mount; /* time overflow in y2106 */
__u16 first_bucket;
union {
@@ -318,7 +318,7 @@ struct uuid_entry {
struct {
__u8 uuid[16];
__u8 label[32];
- __u32 first_reg;
+ __u32 first_reg; /* time overflow in y2106 */
__u32 last_reg;
__u32 invalidated;
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index e3c70fe6bf0f..ff5a5db8906a 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -117,7 +117,7 @@ struct blk_zone_report {
__u32 nr_zones;
__u8 reserved[4];
struct blk_zone zones[0];
-} __packed;
+};
/**
* struct blk_zone_range - BLKRESETZONE ioctl request
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 59b19b6a40d7..66917a4eba27 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -75,6 +75,11 @@ struct bpf_lpm_trie_key {
__u8 data[0]; /* Arbitrary size */
};
+struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id; /* cgroup inode id */
+ __u32 attach_type; /* program attach type */
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -120,6 +125,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
+ BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
};
enum bpf_prog_type {
@@ -144,6 +151,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
+ BPF_PROG_TYPE_SK_REUSEPORT,
};
enum bpf_attach_type {
@@ -1371,6 +1379,20 @@ union bpf_attr {
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
*
+ * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
+ * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
* u32 bpf_get_socket_uid(struct sk_buff *skb)
* Return
* The owner UID of the socket associated to *skb*. If the socket
@@ -1826,7 +1848,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -1857,7 +1879,8 @@ union bpf_attr {
* is resolved), the nexthop address is returned in ipv4_dst
* or ipv6_dst based on family, smac is set to mac address of
* egress device, dmac is set to nexthop mac address, rt_metric
- * is set to metric from route (IPv4/IPv6 only).
+ * is set to metric from route (IPv4/IPv6 only), and ifindex
+ * is set to the device index of the nexthop from the FIB lookup.
*
* *plen* argument is the size of the passed in struct.
* *flags* argument can be a combination of one or more of the
@@ -1873,9 +1896,10 @@ union bpf_attr {
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
* Return
- * Egress device index on success, 0 if packet needs to continue
- * up the stack for further processing or a negative error in case
- * of failure.
+ * * < 0 if any input argument is invalid
+ * * 0 on success (packet is forwarded, nexthop neighbor exists)
+ * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ * packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* Description
@@ -2031,7 +2055,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2051,7 +2074,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2071,10 +2093,54 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
+ *
+ * void* get_local_storage(void *map, u64 flags)
+ * Description
+ * Get the pointer to the local storage area.
+ * The type and the size of the local storage is defined
+ * by the *map* argument.
+ * The *flags* meaning is specific for each map type,
+ * and has to be 0 for cgroup local storage.
+ *
+ * Depending on the bpf program type, a local storage area
+ * can be shared between multiple instances of the bpf program,
+ * running simultaneously.
+ *
+ * A user should care about the synchronization by himself.
+ * For example, by using the BPF_STX_XADD instruction to alter
+ * the shared data.
+ * Return
+ * Pointer to the local storage area.
+ *
+ * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
+ * It checks the selected sk is matching the incoming
+ * request in the skb.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2157,7 +2223,10 @@ union bpf_attr {
FN(rc_repeat), \
FN(rc_keydown), \
FN(skb_cgroup_id), \
- FN(get_current_cgroup_id),
+ FN(get_current_cgroup_id), \
+ FN(get_local_storage), \
+ FN(sk_select_reuseport), \
+ FN(skb_ancestor_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2374,6 +2443,30 @@ struct sk_msg_md {
__u32 local_port; /* stored in host byte order */
};
+struct sk_reuseport_md {
+ /*
+ * Start of directly accessible data. It begins from
+ * the tcp/udp header.
+ */
+ void *data;
+ void *data_end; /* End of directly accessible data */
+ /*
+ * Total length of packet (starting from the tcp/udp header).
+ * Note that the directly accessible bytes (data_end - data)
+ * could be less than this "len". Those bytes could be
+ * indirectly read by a helper "bpf_skb_load_bytes()".
+ */
+ __u32 len;
+ /*
+ * Eth protocol in the mac header (network byte order). e.g.
+ * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
+ */
+ __u32 eth_protocol;
+ __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
+ __u32 bind_inany; /* Is sock bound to an INANY address? */
+ __u32 hash; /* A hash of the packet 4 tuples */
+};
+
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
@@ -2555,6 +2648,9 @@ enum {
* Arg1: old_state
* Arg2: new_state
*/
+ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
+ * socket transition to LISTEN state.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -2612,6 +2708,18 @@ struct bpf_raw_tracepoint_args {
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
+enum {
+ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
+ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
+ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
+ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
+ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
+ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+};
+
struct bpf_fib_lookup {
/* input: network family for lookup (AF_INET, AF_INET6)
* output: network family of egress nexthop
@@ -2625,7 +2733,11 @@ struct bpf_fib_lookup {
/* total length of packet from network header - used for MTU check */
__u16 tot_len;
- __u32 ifindex; /* L3 device index for lookup */
+
+ /* input: L3 device index for lookup
+ * output: device index from FIB lookup
+ */
+ __u32 ifindex;
union {
/* inputs to lookup */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff)
+#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */
#define BTF_INT_SIGNED (1 << 0)
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index d7f97ac197a9..0afb7d8e867f 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -77,7 +77,7 @@ typedef __u32 canid_t;
/*
* Controller Area Network Error Message Frame Mask structure
*
- * bit 0-28 : error class mask (see include/linux/can/error.h)
+ * bit 0-28 : error class mask (see include/uapi/linux/can/error.h)
* bit 29-31 : set to zero
*/
typedef __u32 can_err_mask_t;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 20fe091b7e96..097fcd812471 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -384,6 +384,8 @@ struct cec_log_addrs {
#define CEC_EVENT_PIN_CEC_HIGH 4
#define CEC_EVENT_PIN_HPD_LOW 5
#define CEC_EVENT_PIN_HPD_HIGH 6
+#define CEC_EVENT_PIN_5V_LOW 7
+#define CEC_EVENT_PIN_5V_HIGH 8
#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
#define CEC_EVENT_FL_DROPPED_EVENTS (1 << 1)
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 60aa2e446698..69df19aa8e72 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -233,7 +233,8 @@ struct cee_pfc {
* 2 Well known port number over TCP or SCTP
* 3 Well known port number over UDP or DCCP
* 4 Well known port number over TCP, SCTP, UDP, or DCCP
- * 5-7 Reserved
+ * 5 Differentiated Services Code Point (DSCP) value
+ * 6-7 Reserved
*
* Selector field values for CEE
* 0 Ethertype
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 75cb5450c851..79407bbd296d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -78,6 +78,17 @@ enum devlink_command {
*/
DEVLINK_CMD_RELOAD,
+ DEVLINK_CMD_PARAM_GET, /* can dump */
+ DEVLINK_CMD_PARAM_SET,
+ DEVLINK_CMD_PARAM_NEW,
+ DEVLINK_CMD_PARAM_DEL,
+
+ DEVLINK_CMD_REGION_GET,
+ DEVLINK_CMD_REGION_SET,
+ DEVLINK_CMD_REGION_NEW,
+ DEVLINK_CMD_REGION_DEL,
+ DEVLINK_CMD_REGION_READ,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -142,6 +153,16 @@ enum devlink_port_flavour {
*/
};
+enum devlink_param_cmode {
+ DEVLINK_PARAM_CMODE_RUNTIME,
+ DEVLINK_PARAM_CMODE_DRIVERINIT,
+ DEVLINK_PARAM_CMODE_PERMANENT,
+
+ /* Add new configuration modes above */
+ __DEVLINK_PARAM_CMODE_MAX,
+ DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -238,6 +259,27 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_NUMBER, /* u32 */
DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */
+ DEVLINK_ATTR_PARAM, /* nested */
+ DEVLINK_ATTR_PARAM_NAME, /* string */
+ DEVLINK_ATTR_PARAM_GENERIC, /* flag */
+ DEVLINK_ATTR_PARAM_TYPE, /* u8 */
+ DEVLINK_ATTR_PARAM_VALUES_LIST, /* nested */
+ DEVLINK_ATTR_PARAM_VALUE, /* nested */
+ DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */
+ DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */
+
+ DEVLINK_ATTR_REGION_NAME, /* string */
+ DEVLINK_ATTR_REGION_SIZE, /* u64 */
+ DEVLINK_ATTR_REGION_SNAPSHOTS, /* nested */
+ DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */
+ DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */
+
+ DEVLINK_ATTR_REGION_CHUNKS, /* nested */
+ DEVLINK_ATTR_REGION_CHUNK, /* nested */
+ DEVLINK_ATTR_REGION_CHUNK_DATA, /* binary */
+ DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */
+ DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
index 69f7a85d81b1..afeae063e640 100644
--- a/include/uapi/linux/dvb/audio.h
+++ b/include/uapi/linux/dvb/audio.h
@@ -67,27 +67,6 @@ typedef struct audio_status {
} audio_status_t; /* separate decoder hardware */
-typedef
-struct audio_karaoke { /* if Vocal1 or Vocal2 are non-zero, they get mixed */
- int vocal1; /* into left and right t at 70% each */
- int vocal2; /* if both, Vocal1 and Vocal2 are non-zero, Vocal1 gets*/
- int melody; /* mixed into the left channel and */
- /* Vocal2 into the right channel at 100% each. */
- /* if Melody is non-zero, the melody channel gets mixed*/
-} audio_karaoke_t; /* into left and right */
-
-
-typedef __u16 audio_attributes_t;
-/* bits: descr. */
-/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
-/* 12 multichannel extension */
-/* 11-10 audio type (0=not spec, 1=language included) */
-/* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */
-/* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */
-/* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */
-/* 2- 0 number of audio channels (n+1 channels) */
-
-
/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
#define AUDIO_CAP_DTS 1
#define AUDIO_CAP_LPCM 2
@@ -115,22 +94,6 @@ typedef __u16 audio_attributes_t;
#define AUDIO_SET_ID _IO('o', 13)
#define AUDIO_SET_MIXER _IOW('o', 14, audio_mixer_t)
#define AUDIO_SET_STREAMTYPE _IO('o', 15)
-#define AUDIO_SET_EXT_ID _IO('o', 16)
-#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
-#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
-
-/**
- * AUDIO_GET_PTS
- *
- * Read the 33 bit presentation time stamp as defined
- * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
- *
- * The PTS should belong to the currently played
- * frame if possible, but may also be a value close to it
- * like the PTS of the last decoded frame or the last PTS
- * extracted by the PES parser.
- */
-#define AUDIO_GET_PTS _IOR('o', 19, __u64)
#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
#endif /* _DVBAUDIO_H_ */
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index df3d7028c807..43ba8b0a3d14 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -38,18 +38,6 @@ typedef enum {
typedef enum {
- VIDEO_SYSTEM_PAL,
- VIDEO_SYSTEM_NTSC,
- VIDEO_SYSTEM_PALN,
- VIDEO_SYSTEM_PALNc,
- VIDEO_SYSTEM_PALM,
- VIDEO_SYSTEM_NTSC60,
- VIDEO_SYSTEM_PAL60,
- VIDEO_SYSTEM_PALM60
-} video_system_t;
-
-
-typedef enum {
VIDEO_PAN_SCAN, /* use pan and scan format */
VIDEO_LETTER_BOX, /* use letterbox format */
VIDEO_CENTER_CUT_OUT /* use center cut out format */
@@ -160,44 +148,6 @@ struct video_still_picture {
};
-typedef
-struct video_highlight {
- int active; /* 1=show highlight, 0=hide highlight */
- __u8 contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- __u8 color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- __u8 color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- __u32 ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- __u32 xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
-} video_highlight_t;
-
-
-typedef struct video_spu {
- int active;
- int stream_id;
-} video_spu_t;
-
-
-typedef struct video_spu_palette { /* SPU Palette information */
- int length;
- __u8 __user *palette;
-} video_spu_palette_t;
-
-
-typedef struct video_navi_pack {
- int length; /* 0 ... 1024 */
- __u8 data[1024];
-} video_navi_pack_t;
-
-
typedef __u16 video_attributes_t;
/* bits: descr. */
/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
@@ -242,17 +192,9 @@ typedef __u16 video_attributes_t;
#define VIDEO_SLOWMOTION _IO('o', 32)
#define VIDEO_GET_CAPABILITIES _IOR('o', 33, unsigned int)
#define VIDEO_CLEAR_BUFFER _IO('o', 34)
-#define VIDEO_SET_ID _IO('o', 35)
#define VIDEO_SET_STREAMTYPE _IO('o', 36)
#define VIDEO_SET_FORMAT _IO('o', 37)
-#define VIDEO_SET_SYSTEM _IO('o', 38)
-#define VIDEO_SET_HIGHLIGHT _IOW('o', 39, video_highlight_t)
-#define VIDEO_SET_SPU _IOW('o', 50, video_spu_t)
-#define VIDEO_SET_SPU_PALETTE _IOW('o', 51, video_spu_palette_t)
-#define VIDEO_GET_NAVI _IOR('o', 52, video_navi_pack_t)
-#define VIDEO_SET_ATTRIBUTES _IO('o', 53)
#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
-#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
/**
* VIDEO_GET_PTS
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 4e12c423b9fe..c5358e0ae7c5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -422,6 +422,8 @@ typedef struct elf64_shdr {
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
+#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
+#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index dc64cfaf13da..c0151200f7d1 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -20,12 +20,16 @@ struct sock_extended_err {
#define SO_EE_ORIGIN_ICMP6 3
#define SO_EE_ORIGIN_TXSTATUS 4
#define SO_EE_ORIGIN_ZEROCOPY 5
+#define SO_EE_ORIGIN_TXTIME 6
#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
#define SO_EE_CODE_ZEROCOPY_COPIED 1
+#define SO_EE_CODE_TXTIME_INVALID_PARAM 1
+#define SO_EE_CODE_TXTIME_MISSED 2
+
/**
* struct scm_timestamping - timestamps exposed through cmsg
*
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4ca65b56084f..dc69391d2bba 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -226,7 +226,7 @@ enum tunable_id {
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
- * Add your fresh new tubale attribute above and remember to update
+ * Add your fresh new tunable attribute above and remember to update
* tunable_strings[] in net/core/ethtool.c
*/
__ETHTOOL_TUNABLE_COUNT,
@@ -870,7 +870,8 @@ struct ethtool_flow_ext {
* includes the %FLOW_EXT or %FLOW_MAC_EXT flag
* (see &struct ethtool_flow_ext description).
* @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
- * if packets should be discarded
+ * if packets should be discarded, or %RX_CLS_FLOW_WAKE if the
+ * packets should be used for Wake-on-LAN with %WAKE_FILTER
* @location: Location of rule in the table. Locations must be
* numbered such that a flow matching multiple rules will be
* classified according to the first (lowest numbered) rule.
@@ -902,13 +903,13 @@ struct ethtool_rx_flow_spec {
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
-};
+}
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
-};
+}
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
@@ -1634,6 +1635,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WAKE_ARP (1 << 4)
#define WAKE_MAGIC (1 << 5)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+#define WAKE_FILTER (1 << 7)
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
@@ -1671,6 +1673,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
+#define RX_CLS_FLOW_WAKE 0xfffffffffffffffeULL
/* Special RX classification rule insert location values */
#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cf01b6824244..43391e2d1153 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -164,6 +164,8 @@ enum {
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
+ IFLA_MIN_MTU,
+ IFLA_MAX_MTU,
__IFLA_MAX
};
@@ -334,6 +336,7 @@ enum {
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
+ IFLA_BRPORT_BACKUP_PORT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
+/* XFRM section */
+enum {
+ IFLA_XFRM_UNSPEC,
+ IFLA_XFRM_LINK,
+ IFLA_XFRM_IF_ID,
+ __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
enum macsec_validation_type {
MACSEC_VALIDATE_DISABLED = 0,
MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
XDP_ATTACHED_DRV,
XDP_ATTACHED_SKB,
XDP_ATTACHED_HW,
+ XDP_ATTACHED_MULTI,
};
enum {
@@ -928,6 +942,9 @@ enum {
IFLA_XDP_ATTACHED,
IFLA_XDP_FLAGS,
IFLA_XDP_PROG_ID,
+ IFLA_XDP_DRV_PROG_ID,
+ IFLA_XDP_SKB_PROG_ID,
+ IFLA_XDP_HW_PROG_ID,
__IFLA_XDP_MAX,
};
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index 483b77af4eb8..db45d3e49a12 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -30,6 +30,7 @@ enum {
ILA_CMD_ADD,
ILA_CMD_DEL,
ILA_CMD_GET,
+ ILA_CMD_FLUSH,
__ILA_CMD_MAX,
};
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index b24a742beae5..e42d13b55cf3 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -168,6 +168,7 @@ enum
IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
+ IPV4_DEVCONF_BC_FORWARDING,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b4f5073dbac2..01674b56e14f 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -76,6 +76,12 @@ struct kfd_ioctl_update_queue_args {
__u32 queue_priority; /* to KFD */
};
+struct kfd_ioctl_set_cu_mask_args {
+ __u32 queue_id; /* to KFD */
+ __u32 num_cu_mask; /* to KFD */
+ __u64 cu_mask_ptr; /* to KFD */
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -189,6 +195,15 @@ struct kfd_ioctl_dbg_wave_control_args {
#define KFD_SIGNAL_EVENT_LIMIT 4096
+/* For kfd_event_data.hw_exception_data.reset_type. */
+#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
+#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
+
+/* For kfd_event_data.hw_exception_data.reset_cause. */
+#define KFD_HW_EXCEPTION_GPU_HANG 0
+#define KFD_HW_EXCEPTION_ECC 1
+
+
struct kfd_ioctl_create_event_args {
__u64 event_page_offset; /* from KFD */
__u32 event_trigger_data; /* from KFD - signal events only */
@@ -219,7 +234,7 @@ struct kfd_memory_exception_failure {
__u32 NotPresent; /* Page not present or supervisor privilege */
__u32 ReadOnly; /* Write access to a read-only page */
__u32 NoExecute; /* Execute access to a page marked NX */
- __u32 pad;
+ __u32 imprecise; /* Can't determine the exact fault address */
};
/* memory exception data*/
@@ -230,10 +245,19 @@ struct kfd_hsa_memory_exception_data {
__u32 pad;
};
-/* Event data*/
+/* hw exception data */
+struct kfd_hsa_hw_exception_data {
+ uint32_t reset_type;
+ uint32_t reset_cause;
+ uint32_t memory_lost;
+ uint32_t gpu_id;
+};
+
+/* Event data */
struct kfd_event_data {
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
}; /* From KFD */
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
@@ -448,7 +472,10 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
+#define AMDKFD_IOC_SET_CU_MASK \
+ AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1A
+#define AMDKFD_COMMAND_END 0x1B
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b6270a3b38e9..b955b986b341 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 7d570c7bd117..61158f5a1a5b 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -60,14 +60,14 @@ struct sockaddr_l2tpip6 {
/*
* Commands.
* Valid TLVs of each command are:-
- * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid
+ * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum
* TUNNEL_DELETE - CONN_ID
* TUNNEL_MODIFY - CONN_ID, udpcsum
* TUNNEL_GETSTATS - CONN_ID, (stats)
* TUNNEL_GET - CONN_ID, (...)
- * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
+ * SESSION_CREATE - SESSION_ID, PW_TYPE, cookie, peer_cookie, l2spec
* SESSION_DELETE - SESSION_ID
- * SESSION_MODIFY - SESSION_ID, data_seq
+ * SESSION_MODIFY - SESSION_ID
* SESSION_GET - SESSION_ID, (...)
* SESSION_GETSTATS - SESSION_ID, (stats)
*
@@ -95,7 +95,7 @@ enum {
L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
L2TP_ATTR_OFFSET, /* u16 (not used) */
- L2TP_ATTR_DATA_SEQ, /* u16 */
+ L2TP_ATTR_DATA_SEQ, /* u16 (not used) */
L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
L2TP_ATTR_PROTO_VERSION, /* u8 */
@@ -105,7 +105,7 @@ enum {
L2TP_ATTR_SESSION_ID, /* u32 */
L2TP_ATTR_PEER_SESSION_ID, /* u32 */
L2TP_ATTR_UDP_CSUM, /* u8 */
- L2TP_ATTR_VLAN_ID, /* u16 */
+ L2TP_ATTR_VLAN_ID, /* u16 (not used) */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
@@ -119,8 +119,8 @@ enum {
L2TP_ATTR_IP_DADDR, /* u32 */
L2TP_ATTR_UDP_SPORT, /* u16 */
L2TP_ATTR_UDP_DPORT, /* u16 */
- L2TP_ATTR_MTU, /* u16 */
- L2TP_ATTR_MRU, /* u16 */
+ L2TP_ATTR_MTU, /* u16 (not used) */
+ L2TP_ATTR_MRU, /* u16 (not used) */
L2TP_ATTR_STATS, /* nested */
L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
@@ -169,6 +169,7 @@ enum l2tp_encap_type {
L2TP_ENCAPTYPE_IP,
};
+/* For L2TP_ATTR_DATA_SEQ. Unused. */
enum l2tp_seqmode {
L2TP_SEQ_NONE = 0,
L2TP_SEQ_IP = 1,
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 9e3511742fdc..d6a5a3bfe6c4 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -62,7 +62,7 @@
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202c */
+/* YUV (including grey) - next is 0x202d */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -74,6 +74,7 @@
#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
#define MEDIA_BUS_FMT_Y10_1X10 0x200a
+#define MEDIA_BUS_FMT_Y10_2X8_PADHI_LE 0x202c
#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index c7e9a5cba24e..36f76e777ef9 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -25,7 +25,6 @@
#endif
#include <linux/ioctl.h>
#include <linux/types.h>
-#include <linux/version.h>
struct media_device_info {
char driver[16];
@@ -90,12 +89,6 @@ struct media_device_info {
#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
/*
- * Video decoder functions
- */
-#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
-#define MEDIA_ENT_F_DTV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
-
-/*
* Digital TV, analog TV, radio and/or software defined radio tuner functions.
*
* It is a responsibility of the master/bridge drivers to add connectors
@@ -132,6 +125,8 @@ struct media_device_info {
#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
+#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
+#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
/*
* Switch and bridge entity functions
@@ -139,6 +134,13 @@ struct media_device_info {
#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
+/*
+ * Video decoder/encoder functions
+ */
+#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
+#define MEDIA_ENT_F_DV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
+#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
+
/* Entity flags */
#define MEDIA_ENT_FL_DEFAULT (1 << 0)
#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
@@ -280,11 +282,21 @@ struct media_links_enum {
* MC next gen API definitions
*/
+/*
+ * Appeared in 4.19.0.
+ *
+ * The media_version argument comes from the media_version field in
+ * struct media_device_info.
+ */
+#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
+ ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+
struct media_v2_entity {
__u32 id;
char name[64];
__u32 function; /* Main function of the entity */
- __u32 reserved[6];
+ __u32 flags;
+ __u32 reserved[5];
} __attribute__ ((packed));
/* Should match the specific fields at media_intf_devnode */
@@ -305,11 +317,21 @@ struct media_v2_interface {
};
} __attribute__ ((packed));
+/*
+ * Appeared in 4.19.0.
+ *
+ * The media_version argument comes from the media_version field in
+ * struct media_device_info.
+ */
+#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
+ ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+
struct media_v2_pad {
__u32 id;
__u32 entity_id;
__u32 flags;
- __u32 reserved[5];
+ __u32 index;
+ __u32 reserved[4];
} __attribute__ ((packed));
struct media_v2_link {
@@ -348,7 +370,7 @@ struct media_v2_topology {
#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
-#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
+#ifndef __KERNEL__
/*
* Legacy symbols used to avoid userspace compilation breakages.
@@ -380,6 +402,8 @@ struct media_v2_topology {
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
+#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER
+
/*
* There is still no ALSA support in the media controller. These
* defines should not have been added and we leave them here only
@@ -396,7 +420,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0)
+#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
#endif
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index b5c2fdcf23fd..a506216591d6 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -136,6 +136,7 @@
#define CTL1000_ENABLE_MASTER 0x1000
/* 1000BASE-T Status register */
+#define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */
#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 10f9ff9426a2..5d37a9ccce63 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -120,6 +120,7 @@ enum {
IPMRA_TABLE_MROUTE_DO_ASSERT,
IPMRA_TABLE_MROUTE_DO_PIM,
IPMRA_TABLE_VIFS,
+ IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
__IPMRA_TABLE_MAX
};
#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
@@ -173,5 +174,6 @@ enum {
#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */
#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */
#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
+#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */
#endif /* _UAPI__LINUX_MROUTE_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 85a3fb65e40a..20d6cc91435d 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -53,6 +53,9 @@ enum {
/* These are client behavior specific flags. */
#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+ * close by last opener.
+ */
/* userspace doesn't need the nbd_device structure */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 4fe104b2411f..97ff3c17ec4d 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -141,4 +141,22 @@ struct scm_ts_pktinfo {
__u32 reserved[2];
};
+/*
+ * SO_TXTIME gets a struct sock_txtime with flags being an integer bit
+ * field comprised of these values.
+ */
+enum txtime_flags {
+ SOF_TXTIME_DEADLINE_MODE = (1 << 0),
+ SOF_TXTIME_REPORT_ERRORS = (1 << 1),
+
+ SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS,
+ SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) |
+ SOF_TXTIME_FLAGS_LAST
+};
+
+struct sock_txtime {
+ clockid_t clockid; /* reference clockid */
+ __u32 flags; /* as defined by enum txtime_flags */
+};
+
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index c84fcdfca862..fac4edd55379 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -18,6 +18,7 @@ enum {
NETCONFA_PROXY_NEIGH,
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
NETCONFA_INPUT,
+ NETCONFA_BC_FORWARDING,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 89438e68dc03..e23290ffdc77 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -8,6 +8,7 @@
#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_USERDATA_MAXLEN 256
+#define NFT_OSF_MAXGENRELEN 16
/**
* enum nft_registers - nf_tables registers
@@ -921,10 +922,12 @@ enum nft_socket_attributes {
/*
* enum nft_socket_keys - nf_tables socket expression keys
*
- * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option_
+ * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
+ * @NFT_SOCKET_MARK: Value of the socket mark
*/
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
+ NFT_SOCKET_MARK,
__NFT_SOCKET_MAX
};
#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -955,6 +958,7 @@ enum nft_socket_keys {
* @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
* @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
* @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
+ * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack
*/
enum nft_ct_keys {
NFT_CT_STATE,
@@ -980,6 +984,7 @@ enum nft_ct_keys {
NFT_CT_DST_IP,
NFT_CT_SRC_IP6,
NFT_CT_DST_IP6,
+ NFT_CT_TIMEOUT,
__NFT_CT_MAX
};
#define NFT_CT_MAX (__NFT_CT_MAX - 1)
@@ -1251,6 +1256,22 @@ enum nft_nat_attributes {
#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
/**
+ * enum nft_tproxy_attributes - nf_tables tproxy expression netlink attributes
+ *
+ * NFTA_TPROXY_FAMILY: Target address family (NLA_U32: nft_registers)
+ * NFTA_TPROXY_REG_ADDR: Target address register (NLA_U32: nft_registers)
+ * NFTA_TPROXY_REG_PORT: Target port register (NLA_U32: nft_registers)
+ */
+enum nft_tproxy_attributes {
+ NFTA_TPROXY_UNSPEC,
+ NFTA_TPROXY_FAMILY,
+ NFTA_TPROXY_REG_ADDR,
+ NFTA_TPROXY_REG_PORT,
+ __NFTA_TPROXY_MAX
+};
+#define NFTA_TPROXY_MAX (__NFTA_TPROXY_MAX - 1)
+
+/**
* enum nft_masq_attributes - nf_tables masquerade expression attributes
*
* @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
@@ -1392,13 +1413,24 @@ enum nft_ct_helper_attributes {
};
#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1)
+enum nft_ct_timeout_timeout_attributes {
+ NFTA_CT_TIMEOUT_UNSPEC,
+ NFTA_CT_TIMEOUT_L3PROTO,
+ NFTA_CT_TIMEOUT_L4PROTO,
+ NFTA_CT_TIMEOUT_DATA,
+ __NFTA_CT_TIMEOUT_MAX,
+};
+#define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1)
+
#define NFT_OBJECT_UNSPEC 0
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
#define NFT_OBJECT_CT_HELPER 3
#define NFT_OBJECT_LIMIT 4
#define NFT_OBJECT_CONNLIMIT 5
-#define __NFT_OBJECT_MAX 6
+#define NFT_OBJECT_TUNNEL 6
+#define NFT_OBJECT_CT_TIMEOUT 7
+#define __NFT_OBJECT_MAX 8
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
@@ -1461,6 +1493,13 @@ enum nft_flowtable_hook_attributes {
};
#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+enum nft_osf_attributes {
+ NFTA_OSF_UNSPEC,
+ NFTA_OSF_DREG,
+ __NFTA_OSF_MAX,
+};
+#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
+
/**
* enum nft_device_attributes - nf_tables device netlink attributes
*
@@ -1555,4 +1594,85 @@ enum nft_ng_types {
};
#define NFT_NG_MAX (__NFT_NG_MAX - 1)
+enum nft_tunnel_key_ip_attributes {
+ NFTA_TUNNEL_KEY_IP_UNSPEC,
+ NFTA_TUNNEL_KEY_IP_SRC,
+ NFTA_TUNNEL_KEY_IP_DST,
+ __NFTA_TUNNEL_KEY_IP_MAX
+};
+#define NFTA_TUNNEL_KEY_IP_MAX (__NFTA_TUNNEL_KEY_IP_MAX - 1)
+
+enum nft_tunnel_ip6_attributes {
+ NFTA_TUNNEL_KEY_IP6_UNSPEC,
+ NFTA_TUNNEL_KEY_IP6_SRC,
+ NFTA_TUNNEL_KEY_IP6_DST,
+ NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
+ __NFTA_TUNNEL_KEY_IP6_MAX
+};
+#define NFTA_TUNNEL_KEY_IP6_MAX (__NFTA_TUNNEL_KEY_IP6_MAX - 1)
+
+enum nft_tunnel_opts_attributes {
+ NFTA_TUNNEL_KEY_OPTS_UNSPEC,
+ NFTA_TUNNEL_KEY_OPTS_VXLAN,
+ NFTA_TUNNEL_KEY_OPTS_ERSPAN,
+ __NFTA_TUNNEL_KEY_OPTS_MAX
+};
+#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1)
+
+enum nft_tunnel_opts_vxlan_attributes {
+ NFTA_TUNNEL_KEY_VXLAN_UNSPEC,
+ NFTA_TUNNEL_KEY_VXLAN_GBP,
+ __NFTA_TUNNEL_KEY_VXLAN_MAX
+};
+#define NFTA_TUNNEL_KEY_VXLAN_MAX (__NFTA_TUNNEL_KEY_VXLAN_MAX - 1)
+
+enum nft_tunnel_opts_erspan_attributes {
+ NFTA_TUNNEL_KEY_ERSPAN_UNSPEC,
+ NFTA_TUNNEL_KEY_ERSPAN_VERSION,
+ NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
+ NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
+ NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
+ __NFTA_TUNNEL_KEY_ERSPAN_MAX
+};
+#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1)
+
+enum nft_tunnel_flags {
+ NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
+ NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
+ NFT_TUNNEL_F_SEQ_NUMBER = (1 << 2),
+};
+#define NFT_TUNNEL_F_MASK (NFT_TUNNEL_F_ZERO_CSUM_TX | \
+ NFT_TUNNEL_F_DONT_FRAGMENT | \
+ NFT_TUNNEL_F_SEQ_NUMBER)
+
+enum nft_tunnel_key_attributes {
+ NFTA_TUNNEL_KEY_UNSPEC,
+ NFTA_TUNNEL_KEY_ID,
+ NFTA_TUNNEL_KEY_IP,
+ NFTA_TUNNEL_KEY_IP6,
+ NFTA_TUNNEL_KEY_FLAGS,
+ NFTA_TUNNEL_KEY_TOS,
+ NFTA_TUNNEL_KEY_TTL,
+ NFTA_TUNNEL_KEY_SPORT,
+ NFTA_TUNNEL_KEY_DPORT,
+ NFTA_TUNNEL_KEY_OPTS,
+ __NFTA_TUNNEL_KEY_MAX
+};
+#define NFTA_TUNNEL_KEY_MAX (__NFTA_TUNNEL_KEY_MAX - 1)
+
+enum nft_tunnel_keys {
+ NFT_TUNNEL_PATH,
+ NFT_TUNNEL_ID,
+ __NFT_TUNNEL_MAX
+};
+#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1)
+
+enum nft_tunnel_attributes {
+ NFTA_TUNNEL_UNSPEC,
+ NFTA_TUNNEL_KEY,
+ NFTA_TUNNEL_DREG,
+ __NFTA_TUNNEL_MAX
+};
+#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nfnetlink_osf.h
index 8f2f2f403183..76a3527df5dd 100644
--- a/include/uapi/linux/netfilter/nf_osf.h
+++ b/include/uapi/linux/netfilter/nfnetlink_osf.h
@@ -16,9 +16,14 @@
#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
+/* Check if ip TTL is less than fingerprint one */
+#define NF_OSF_TTL_LESS 1
+
/* Do not compare ip and fingerprint TTL at all */
#define NF_OSF_TTL_NOCHECK 2
+#define NF_OSF_FLAGMASK (NF_OSF_GENRE | NF_OSF_TTL | \
+ NF_OSF_LOG | NF_OSF_INVERT)
/* Wildcard MSS (kind of).
* It is used to implement a state machine for the different wildcard values
* of the MSS and window sizes.
@@ -83,4 +88,31 @@ enum iana_options {
OSFOPT_EMPTY = 255,
};
+/* Initial window size option state machine: multiple of mss, mtu or
+ * plain numeric value. Can also be made as plain numeric value which
+ * is not a multiple of specified value.
+ */
+enum nf_osf_window_size_options {
+ OSF_WSS_PLAIN = 0,
+ OSF_WSS_MSS,
+ OSF_WSS_MTU,
+ OSF_WSS_MODULO,
+ OSF_WSS_MAX,
+};
+
+enum nf_osf_attr_type {
+ OSF_ATTR_UNSPEC,
+ OSF_ATTR_FINGER,
+ OSF_ATTR_MAX,
+};
+
+/*
+ * Add/remove fingerprint from the kernel.
+ */
+enum nf_osf_msg_types {
+ OSF_MSG_ADD,
+ OSF_MSG_REMOVE,
+ OSF_MSG_MAX,
+};
+
#endif /* _NF_OSF_H */
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 72956eceeb09..24102b5286ec 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -23,7 +23,7 @@
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/tcp.h>
-#include <linux/netfilter/nf_osf.h>
+#include <linux/netfilter/nfnetlink_osf.h>
#define XT_OSF_GENRE NF_OSF_GENRE
#define XT_OSF_INVERT NF_OSF_INVERT
@@ -37,8 +37,7 @@
#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE
#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK
-
-#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
+#define XT_OSF_TTL_LESS NF_OSF_TTL_LESS
#define xt_osf_wc nf_osf_wc
#define xt_osf_opt nf_osf_opt
@@ -47,19 +46,8 @@
#define xt_osf_finger nf_osf_finger
#define xt_osf_nlmsg nf_osf_nlmsg
-/*
- * Add/remove fingerprint from the kernel.
- */
-enum xt_osf_msg_types {
- OSF_MSG_ADD,
- OSF_MSG_REMOVE,
- OSF_MSG_MAX,
-};
-
-enum xt_osf_attr_type {
- OSF_ATTR_UNSPEC,
- OSF_ATTR_FINGER,
- OSF_ATTR_MAX,
-};
+#define xt_osf_window_size_options nf_osf_window_size_options
+#define xt_osf_attr_type nf_osf_attr_type
+#define xt_osf_msg_types nf_osf_msg_types
#endif /* _XT_OSF_H */
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 12fb77633f83..156ccd089df1 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -26,4 +26,15 @@
#define NF_BR_BROUTING 5
#define NF_BR_NUMHOOKS 6
+enum nf_br_hook_priorities {
+ NF_BR_PRI_FIRST = INT_MIN,
+ NF_BR_PRI_NAT_DST_BRIDGED = -300,
+ NF_BR_PRI_FILTER_BRIDGED = -200,
+ NF_BR_PRI_BRNF = 0,
+ NF_BR_PRI_NAT_DST_OTHER = 100,
+ NF_BR_PRI_FILTER_OTHER = 200,
+ NF_BR_PRI_NAT_SRC = 300,
+ NF_BR_PRI_LAST = INT_MAX,
+};
+
#endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 27e4e441caac..7acc16f34942 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2237,6 +2237,9 @@ enum nl80211_commands {
* enforced.
* @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
* a flow is assigned on each round of the DRR scheduler.
+ * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION). Can be set
+ * only if %NL80211_STA_FLAG_WME is set.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2677,6 +2680,8 @@ enum nl80211_attrs {
NL80211_ATTR_TXQ_MEMORY_LIMIT,
NL80211_ATTR_TXQ_QUANTUM,
+ NL80211_ATTR_HE_CAPABILITY,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2726,7 +2731,8 @@ enum nl80211_attrs {
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
#define NL80211_HT_CAPABILITY_LEN 26
#define NL80211_VHT_CAPABILITY_LEN 12
-
+#define NL80211_HE_MIN_CAPABILITY_LEN 16
+#define NL80211_HE_MAX_CAPABILITY_LEN 51
#define NL80211_MAX_NR_CIPHER_SUITES 5
#define NL80211_MAX_NR_AKM_SUITES 2
@@ -2854,6 +2860,38 @@ struct nl80211_sta_flag_update {
} __attribute__((packed));
/**
+ * enum nl80211_he_gi - HE guard interval
+ * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec
+ */
+enum nl80211_he_gi {
+ NL80211_RATE_INFO_HE_GI_0_8,
+ NL80211_RATE_INFO_HE_GI_1_6,
+ NL80211_RATE_INFO_HE_GI_3_2,
+};
+
+/**
+ * enum nl80211_he_ru_alloc - HE RU allocation values
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation
+ */
+enum nl80211_he_ru_alloc {
+ NL80211_RATE_INFO_HE_RU_ALLOC_26,
+ NL80211_RATE_INFO_HE_RU_ALLOC_52,
+ NL80211_RATE_INFO_HE_RU_ALLOC_106,
+ NL80211_RATE_INFO_HE_RU_ALLOC_242,
+ NL80211_RATE_INFO_HE_RU_ALLOC_484,
+ NL80211_RATE_INFO_HE_RU_ALLOC_996,
+ NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
+};
+
+/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update {
* @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
* a legacy rate and will be reported as the actual bitrate, i.e.
* a quarter of the base (20 MHz) rate
+ * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11)
+ * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier
+ * (u8, see &enum nl80211_he_gi)
+ * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
+ * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
+ * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -2901,6 +2946,11 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_160_MHZ_WIDTH,
NL80211_RATE_INFO_10_MHZ_WIDTH,
NL80211_RATE_INFO_5_MHZ_WIDTH,
+ NL80211_RATE_INFO_HE_MCS,
+ NL80211_RATE_INFO_HE_NSS,
+ NL80211_RATE_INFO_HE_GI,
+ NL80211_RATE_INFO_HE_DCM,
+ NL80211_RATE_INFO_HE_RU_ALLOC,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -3167,6 +3217,38 @@ enum nl80211_mpath_info {
};
/**
+ * enum nl80211_band_iftype_attr - Interface type data attributes
+ *
+ * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute
+ * for each interface type that supports the band data
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
+ * defined in HE capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
+ * defined
+ * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_band_iftype_attr {
+ __NL80211_BAND_IFTYPE_ATTR_INVALID,
+
+ NL80211_BAND_IFTYPE_ATTR_IFTYPES,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+
+ /* keep last */
+ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
+ NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_band_attr - band attributes
* @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_BAND_ATTR_FREQS: supported frequencies in this band,
@@ -3181,6 +3263,8 @@ enum nl80211_mpath_info {
* @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
* defined in 802.11ac
* @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
+ * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
+ * attributes from &enum nl80211_band_iftype_attr
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -3196,6 +3280,7 @@ enum nl80211_band_attr {
NL80211_BAND_ATTR_VHT_MCS_SET,
NL80211_BAND_ATTR_VHT_CAPA,
+ NL80211_BAND_ATTR_IFTYPE_DATA,
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
@@ -5133,6 +5218,11 @@ enum nl80211_feature_flags {
* support to nl80211.
* @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
* TXQs.
+ * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
+ * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN.
+ * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
+ * except for supported rates from the probe request content if requested
+ * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5167,6 +5257,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
NL80211_EXT_FEATURE_TXQS,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5272,6 +5364,12 @@ enum nl80211_timeout_reason {
* possible scan results. This flag hints the driver to use the best
* possible scan configuration to improve the accuracy in scanning.
* Latency and power use may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe
+ * request frames from this scan to avoid correlation/tracking being
+ * possible.
+ * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to
+ * only have supported rates and no additional capabilities (unless
+ * added by userspace explicitly.)
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -5285,6 +5383,8 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_SPAN = 1<<8,
NL80211_SCAN_FLAG_LOW_POWER = 1<<9,
NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
+ NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
+ NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
};
/**
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 863aabaa5cc9..dbe0cbe4f1b7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -840,6 +840,8 @@ struct ovs_action_push_eth {
* @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet.
* @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the
* packet, or modify the packet (e.g., change the DSCP field).
+ * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of
+ * actions without affecting the original packet and key.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -873,6 +875,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */
OVS_ACTION_ATTR_POP_NSH, /* No argument. */
OVS_ACTION_ATTR_METER, /* u32 meter ID. */
+ OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b8e288a1f740..eeb787b1c53c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
+
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
};
/*
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 84e4c1d0f874..be382fb0592d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -45,6 +45,7 @@ enum {
* the skb and act like everything
* is alright.
*/
+#define TC_ACT_VALUE_MAX TC_ACT_TRAP
/* There is a special kind of actions called "extended actions",
* which need a value parameter. These have a local opcode located in
@@ -55,11 +56,12 @@ enum {
#define __TC_ACT_EXT_SHIFT 28
#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
-#define TC_ACT_EXT_CMP(combined, opcode) \
- (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode)
+#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
+#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
#define TC_ACT_JUMP __TC_ACT_EXT(1)
#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
+#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
/* Action type identifiers*/
enum {
@@ -469,12 +471,47 @@ enum {
TCA_FLOWER_KEY_IP_TTL, /* u8 */
TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
+
+ TCA_FLOWER_KEY_ENC_OPTS,
+ TCA_FLOWER_KEY_ENC_OPTS_MASK,
+
__TCA_FLOWER_MAX,
};
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
+ * attributes
+ */
+ __TCA_FLOWER_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..8975fd1a1421 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -124,6 +124,21 @@ struct tc_fifo_qopt {
__u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
};
+/* SKBPRIO section */
+
+/*
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
+ * to map one to one the DS field of IPV4 and IPV6 headers.
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
+ */
+
+#define SKBPRIO_MAX_PRIORITY 64
+
+struct tc_skbprio_qopt {
+ __u32 limit; /* Queue length in packets. */
+};
+
/* PRIO section */
#define TCQ_PRIO_BANDS 16
@@ -539,6 +554,7 @@ enum {
TCA_NETEM_LATENCY64,
TCA_NETEM_JITTER64,
TCA_NETEM_SLOT,
+ TCA_NETEM_SLOT_DIST,
__TCA_NETEM_MAX,
};
@@ -581,6 +597,8 @@ struct tc_netem_slot {
__s64 max_delay;
__s32 max_packets;
__s32 max_bytes;
+ __s64 dist_delay; /* nsec */
+ __s64 dist_jitter; /* nsec */
};
enum {
@@ -934,4 +952,136 @@ enum {
#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+
+/* ETF */
+struct tc_etf_qopt {
+ __s32 delta;
+ __s32 clockid;
+ __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
+#define TC_ETF_OFFLOAD_ON BIT(1)
+};
+
+enum {
+ TCA_ETF_UNSPEC,
+ TCA_ETF_PARMS,
+ __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+ TCA_CAKE_UNSPEC,
+ TCA_CAKE_PAD,
+ TCA_CAKE_BASE_RATE64,
+ TCA_CAKE_DIFFSERV_MODE,
+ TCA_CAKE_ATM,
+ TCA_CAKE_FLOW_MODE,
+ TCA_CAKE_OVERHEAD,
+ TCA_CAKE_RTT,
+ TCA_CAKE_TARGET,
+ TCA_CAKE_AUTORATE,
+ TCA_CAKE_MEMORY,
+ TCA_CAKE_NAT,
+ TCA_CAKE_RAW,
+ TCA_CAKE_WASH,
+ TCA_CAKE_MPU,
+ TCA_CAKE_INGRESS,
+ TCA_CAKE_ACK_FILTER,
+ TCA_CAKE_SPLIT_GSO,
+ __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
+
+enum {
+ __TCA_CAKE_STATS_INVALID,
+ TCA_CAKE_STATS_PAD,
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+ TCA_CAKE_STATS_MEMORY_LIMIT,
+ TCA_CAKE_STATS_MEMORY_USED,
+ TCA_CAKE_STATS_AVG_NETOFF,
+ TCA_CAKE_STATS_MIN_NETLEN,
+ TCA_CAKE_STATS_MAX_NETLEN,
+ TCA_CAKE_STATS_MIN_ADJLEN,
+ TCA_CAKE_STATS_MAX_ADJLEN,
+ TCA_CAKE_STATS_TIN_STATS,
+ TCA_CAKE_STATS_DEFICIT,
+ TCA_CAKE_STATS_COBALT_COUNT,
+ TCA_CAKE_STATS_DROPPING,
+ TCA_CAKE_STATS_DROP_NEXT_US,
+ TCA_CAKE_STATS_P_DROP,
+ TCA_CAKE_STATS_BLUE_TIMER_US,
+ __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+ __TCA_CAKE_TIN_STATS_INVALID,
+ TCA_CAKE_TIN_STATS_PAD,
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+ TCA_CAKE_TIN_STATS_TARGET_US,
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+ __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+ CAKE_FLOW_NONE = 0,
+ CAKE_FLOW_SRC_IP,
+ CAKE_FLOW_DST_IP,
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+ CAKE_FLOW_FLOWS,
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_MAX,
+};
+
+enum {
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
+ CAKE_DIFFSERV_DIFFSERV4,
+ CAKE_DIFFSERV_DIFFSERV8,
+ CAKE_DIFFSERV_BESTEFFORT,
+ CAKE_DIFFSERV_PRECEDENCE,
+ CAKE_DIFFSERV_MAX
+};
+
+enum {
+ CAKE_ACK_NONE = 0,
+ CAKE_ACK_FILTER,
+ CAKE_ACK_AGGRESSIVE,
+ CAKE_ACK_MAX
+};
+
+enum {
+ CAKE_ATM_NONE = 0,
+ CAKE_ATM_ATM,
+ CAKE_ATM_PTM,
+ CAKE_ATM_MAX
+};
+
#endif
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 784c2e3e572e..88b5f9990320 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -68,7 +68,7 @@ struct ppp_option_data {
struct pppol2tp_ioc_stats {
__u16 tunnel_id; /* redundant */
__u16 session_id; /* if zero, get tunnel stats */
- __u32 using_ipsec:1; /* valid only for session_id == 0 */
+ __u32 using_ipsec:1;
__aligned_u64 tx_packets;
__aligned_u64 tx_bytes;
__aligned_u64 tx_errors;
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 20c6bd0b0007..dc520e1a4123 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
/*
- * Copyright (c) 2008 Oracle. All rights reserved.
+ * Copyright (c) 2008, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -118,7 +118,17 @@
#define RDS_INFO_IB_CONNECTIONS 10008
#define RDS_INFO_CONNECTION_STATS 10009
#define RDS_INFO_IWARP_CONNECTIONS 10010
-#define RDS_INFO_LAST 10010
+
+/* PF_RDS6 options */
+#define RDS6_INFO_CONNECTIONS 10011
+#define RDS6_INFO_SEND_MESSAGES 10012
+#define RDS6_INFO_RETRANS_MESSAGES 10013
+#define RDS6_INFO_RECV_MESSAGES 10014
+#define RDS6_INFO_SOCKETS 10015
+#define RDS6_INFO_TCP_SOCKETS 10016
+#define RDS6_INFO_IB_CONNECTIONS 10017
+
+#define RDS_INFO_LAST 10017
struct rds_info_counter {
__u8 name[32];
@@ -140,6 +150,15 @@ struct rds_info_connection {
__u8 flags;
} __attribute__((packed));
+struct rds6_info_connection {
+ __u64 next_tx_seq;
+ __u64 next_rx_seq;
+ struct in6_addr laddr;
+ struct in6_addr faddr;
+ __u8 transport[TRANSNAMSIZ]; /* null term ascii */
+ __u8 flags;
+} __attribute__((packed));
+
#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
@@ -153,6 +172,17 @@ struct rds_info_message {
__u8 flags;
} __attribute__((packed));
+struct rds6_info_message {
+ __u64 seq;
+ __u32 len;
+ struct in6_addr laddr;
+ struct in6_addr faddr;
+ __be16 lport;
+ __be16 fport;
+ __u8 flags;
+ __u8 tos;
+} __attribute__((packed));
+
struct rds_info_socket {
__u32 sndbuf;
__be32 bound_addr;
@@ -163,6 +193,16 @@ struct rds_info_socket {
__u64 inum;
} __attribute__((packed));
+struct rds6_info_socket {
+ __u32 sndbuf;
+ struct in6_addr bound_addr;
+ struct in6_addr connected_addr;
+ __be16 bound_port;
+ __be16 connected_port;
+ __u32 rcvbuf;
+ __u64 inum;
+} __attribute__((packed));
+
struct rds_info_tcp_socket {
__be32 local_addr;
__be16 local_port;
@@ -175,6 +215,18 @@ struct rds_info_tcp_socket {
__u32 last_seen_una;
} __attribute__((packed));
+struct rds6_info_tcp_socket {
+ struct in6_addr local_addr;
+ __be16 local_port;
+ struct in6_addr peer_addr;
+ __be16 peer_port;
+ __u64 hdr_rem;
+ __u64 data_rem;
+ __u32 last_sent_nxt;
+ __u32 last_expected_una;
+ __u32 last_seen_una;
+} __attribute__((packed));
+
#define RDS_IB_GID_LEN 16
struct rds_info_rdma_connection {
__be32 src_addr;
@@ -189,6 +241,19 @@ struct rds_info_rdma_connection {
__u32 rdma_mr_size;
};
+struct rds6_info_rdma_connection {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ __u8 src_gid[RDS_IB_GID_LEN];
+ __u8 dst_gid[RDS_IB_GID_LEN];
+
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 rdma_mr_max;
+ __u32 rdma_mr_size;
+};
+
/* RDS message Receive Path Latency points */
enum rds_message_rxpath_latency {
RDS_MSG_RX_HDR_TO_DGRAM_START = 0,
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index d620fa43756c..9a402fdb60e9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -10,13 +10,8 @@
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <linux/types_32_64.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
enum rseq_cpu_id_state {
RSEQ_CPU_ID_UNINITIALIZED = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
__u32 version;
/* enum rseq_cs_flags */
__u32 flags;
- LINUX_FIELD_u32_u64(start_ip);
+ __u64 start_ip;
/* Offset from start_ip. */
- LINUX_FIELD_u32_u64(post_commit_offset);
- LINUX_FIELD_u32_u64(abort_ip);
+ __u64 post_commit_offset;
+ __u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
/*
@@ -67,28 +62,30 @@ struct rseq_cs {
struct rseq {
/*
* Restartable sequences cpu_id_start field. Updated by the
- * kernel, and read by user-space with single-copy atomicity
- * semantics. Aligned on 32-bit. Always contains a value in the
- * range of possible CPUs, although the value may not be the
- * actual current CPU (e.g. if rseq is not initialized). This
- * CPU number value should always be compared against the value
- * of the cpu_id field before performing a rseq commit or
- * returning a value read from a data structure indexed using
- * the cpu_id_start value.
+ * kernel. Read by user-space with single-copy atomicity
+ * semantics. This field should only be read by the thread which
+ * registered this data structure. Aligned on 32-bit. Always
+ * contains a value in the range of possible CPUs, although the
+ * value may not be the actual current CPU (e.g. if rseq is not
+ * initialized). This CPU number value should always be compared
+ * against the value of the cpu_id field before performing a rseq
+ * commit or returning a value read from a data structure indexed
+ * using the cpu_id_start value.
*/
__u32 cpu_id_start;
/*
- * Restartable sequences cpu_id field. Updated by the kernel,
- * and read by user-space with single-copy atomicity semantics.
- * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
- * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
- * former means "rseq uninitialized", and latter means "rseq
- * initialization failed". This value is meant to be read within
- * rseq critical sections and compared with the cpu_id_start
- * value previously read, before performing the commit instruction,
- * or read and compared with the cpu_id_start value before returning
- * a value loaded from a data structure indexed using the
- * cpu_id_start value.
+ * Restartable sequences cpu_id field. Updated by the kernel.
+ * Read by user-space with single-copy atomicity semantics. This
+ * field should only be read by the thread which registered this
+ * data structure. Aligned on 32-bit. Values
+ * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
+ * have a special semantic: the former means "rseq uninitialized",
+ * and latter means "rseq initialization failed". This value is
+ * meant to be read within rseq critical sections and compared
+ * with the cpu_id_start value previously read, before performing
+ * the commit instruction, or read and compared with the
+ * cpu_id_start value before returning a value loaded from a data
+ * structure indexed using the cpu_id_start value.
*/
__u32 cpu_id;
/*
@@ -105,27 +102,44 @@ struct rseq {
* targeted by the rseq_cs. Also needs to be set to NULL by user-space
* before reclaiming memory that contains the targeted struct rseq_cs.
*
- * Read and set by the kernel with single-copy atomicity semantics.
- * Set by user-space with single-copy atomicity semantics. Aligned
- * on 64-bit.
+ * Read and set by the kernel. Set by user-space with single-copy
+ * atomicity semantics. This field should only be updated by the
+ * thread which registered this data structure. Aligned on 64-bit.
*/
- LINUX_FIELD_u32_u64(rseq_cs);
+ union {
+ __u64 ptr64;
+#ifdef __LP64__
+ __u64 ptr;
+#else
+ struct {
+#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
+ __u32 padding; /* Initialized to zero. */
+ __u32 ptr32;
+#else /* LITTLE */
+ __u32 ptr32;
+ __u32 padding; /* Initialized to zero. */
+#endif /* ENDIAN */
+ } ptr;
+#endif
+ } rseq_cs;
+
/*
- * - RSEQ_DISABLE flag:
+ * Restartable sequences flags field.
+ *
+ * This field should only be updated by the thread which
+ * registered this data structure. Read by the kernel.
+ * Mainly used for single-stepping through rseq critical sections
+ * with debuggers.
*
- * Fallback fast-track flag for single-stepping.
- * Set by user-space if lack of progress is detected.
- * Cleared by user-space after rseq finish.
- * Read by the kernel.
* - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
- * Inhibit instruction sequence block restart and event
- * counter increment on preemption for this thread.
+ * Inhibit instruction sequence block restart on preemption
+ * for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
- * Inhibit instruction sequence block restart and event
- * counter increment on signal delivery for this thread.
+ * Inhibit instruction sequence block restart on signal
+ * delivery for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
- * Inhibit instruction sequence block restart and event
- * counter increment on migration for this thread.
+ * Inhibit instruction sequence block restart on migration for
+ * this thread.
*/
__u32 flags;
} __attribute__((aligned(4 * sizeof(__u64))));
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 7d8502313c99..46399367627f 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -150,6 +150,13 @@ enum {
RTM_NEWCACHEREPORT = 96,
#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
+ RTM_NEWCHAIN = 100,
+#define RTM_NEWCHAIN RTM_NEWCHAIN
+ RTM_DELCHAIN,
+#define RTM_DELCHAIN RTM_DELCHAIN
+ RTM_GETCHAIN,
+#define RTM_GETCHAIN RTM_GETCHAIN
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b64d583bf053..b479db5c71d9 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_RECVNXTINFO 33
#define SCTP_DEFAULT_SNDINFO 34
#define SCTP_AUTH_DEACTIVATE_KEY 35
+#define SCTP_REUSE_PORT 36
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -762,6 +763,8 @@ enum sctp_spp_flags {
SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/
SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */
+ SPP_IPV6_FLOWLABEL = 1<<8,
+ SPP_DSCP = 1<<9,
};
struct sctp_paddrparams {
@@ -772,6 +775,8 @@ struct sctp_paddrparams {
__u32 spp_pathmtu;
__u32 spp_sackdelay;
__u32 spp_flags;
+ __u32 spp_ipv6_flowlabel;
+ __u8 spp_dscp;
} __attribute__((packed, aligned(4)));
/*
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 0ae5d4685ba3..ac9e8c96d9bd 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -20,7 +20,7 @@ struct smc_diag_req {
struct smc_diag_msg {
__u8 diag_family;
__u8 diag_state;
- __u8 diag_fallback;
+ __u8 diag_mode;
__u8 diag_shutdown;
struct inet_diag_sockid id;
@@ -28,6 +28,13 @@ struct smc_diag_msg {
__u64 diag_inode;
};
+/* Mode of a connection */
+enum {
+ SMC_DIAG_MODE_SMCR,
+ SMC_DIAG_MODE_FALLBACK_TCP,
+ SMC_DIAG_MODE_SMCD,
+};
+
/* Extensions */
enum {
@@ -35,6 +42,8 @@ enum {
SMC_DIAG_CONNINFO,
SMC_DIAG_LGRINFO,
SMC_DIAG_SHUTDOWN,
+ SMC_DIAG_DMBINFO,
+ SMC_DIAG_FALLBACK,
__SMC_DIAG_MAX,
};
@@ -83,4 +92,18 @@ struct smc_diag_lgrinfo {
struct smc_diag_linkinfo lnk[1];
__u8 role;
};
+
+struct smc_diag_fallback {
+ __u32 reason;
+ __u32 peer_diagnosis;
+};
+
+struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
+ __u32 linkid; /* Link identifier */
+ __u64 peer_gid; /* Peer GID */
+ __u64 my_gid; /* My GID */
+ __u64 token; /* Token of DMB */
+ __u64 peer_token; /* Token of remote DMBE */
+};
+
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 750d89120335..f80135e5feaa 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -56,6 +56,7 @@ enum
IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
__IPSTATS_MIB_MAX
};
@@ -279,6 +280,8 @@ enum
LINUX_MIB_TCPDELIVERED, /* TCPDelivered */
LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */
LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */
+ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */
+ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 6b58371b1f0d..d71013fffaf6 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -575,7 +575,8 @@ enum {
/* /proc/sys/net/ipv6/icmp */
enum {
- NET_IPV6_ICMP_RATELIMIT=1
+ NET_IPV6_ICMP_RATELIMIT = 1,
+ NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2
};
/* /proc/sys/net/<protocol>/neigh/<dev> */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 6e299349b158..b7b57967d90f 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -44,6 +44,7 @@
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
struct tcmu_mailbox {
__u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
__u16 cmd_id;
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN 0x2
__u8 uflags;
} __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
__u8 scsi_status;
__u8 __pad1;
__u16 __pad2;
- __u32 __pad3;
+ __u32 read_len;
char sense_buffer[TCMU_SENSE_BUFFERSIZE];
} rsp;
};
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 162d1094c41c..24ec792dacc1 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -17,13 +17,15 @@ enum {
TCA_PEDIT_KEY_EX,
__TCA_PEDIT_MAX
};
+
#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
-
+
enum {
TCA_PEDIT_KEY_EX_HTYPE = 1,
TCA_PEDIT_KEY_EX_CMD = 2,
__TCA_PEDIT_KEY_EX_MAX
};
+
#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1)
/* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
@@ -38,6 +40,7 @@ enum pedit_header_type {
TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
__PEDIT_HDR_TYPE_MAX,
};
+
#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1)
enum pedit_cmd {
@@ -45,6 +48,7 @@ enum pedit_cmd {
TCA_PEDIT_KEY_EX_CMD_ADD = 1,
__PEDIT_CMD_MAX,
};
+
#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1)
struct tc_pedit_key {
@@ -55,13 +59,14 @@ struct tc_pedit_key {
__u32 offmask;
__u32 shift;
};
-
+
struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
struct tc_pedit_key keys[0];
};
+
#define tc_pedit tc_pedit_sel
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index fbcfe27a4e6c..6de6071ebed6 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -30,6 +30,7 @@
#define SKBEDIT_F_MARK 0x4
#define SKBEDIT_F_PTYPE 0x8
#define SKBEDIT_F_MASK 0x10
+#define SKBEDIT_F_INHERITDSFIELD 0x20
struct tc_skbedit {
tc_gen;
@@ -45,6 +46,7 @@ enum {
TCA_SKBEDIT_PAD,
TCA_SKBEDIT_PTYPE,
TCA_SKBEDIT_MASK,
+ TCA_SKBEDIT_FLAGS,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 72bbefe5d1d1..be384d63e1b5 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -36,9 +36,37 @@ enum {
TCA_TUNNEL_KEY_PAD,
TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
TCA_TUNNEL_KEY_NO_CSUM, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ TCA_TUNNEL_KEY_ENC_TOS, /* u8 */
+ TCA_TUNNEL_KEY_ENC_TTL, /* u8 */
__TCA_TUNNEL_KEY_MAX,
};
#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+enum {
+ TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ __TCA_TUNNEL_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, /* be16 */
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 29eb659aa77a..e02d31986ff9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -127,6 +127,10 @@ enum {
#define TCP_CM_INQ TCP_INQ
+#define TCP_REPAIR_ON 1
+#define TCP_REPAIR_OFF 0
+#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
+
struct tcp_repair_opt {
__u32 opt_code;
__u32 opt_val;
@@ -231,6 +235,11 @@ struct tcp_info {
__u32 tcpi_delivered;
__u32 tcpi_delivered_ce;
+
+ __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */
+ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */
+ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */
+ __u32 tcpi_reord_seen; /* reordering events seen */
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -253,7 +262,10 @@ enum {
TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */
TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */
-
+ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */
+ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
+ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
+ TCP_NLA_REORD_SEEN, /* reordering events seen */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index fcf936656493..6b56a2208be7 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -49,6 +49,13 @@ struct __kernel_timespec {
};
#endif
+#ifndef __kernel_itimerspec
+struct __kernel_itimerspec {
+ struct __kernel_timespec it_interval; /* timer period */
+ struct __kernel_timespec it_value; /* timer expiration */
+};
+#endif
+
/*
* legacy timeval structure, only embedded in structures that
* traditionally used 'timeval' to pass time intervals (not absolute
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 85c11982c89b..0ebe02ef1a86 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -121,6 +121,7 @@ enum {
TIPC_NLA_SOCK_TIPC_STATE, /* u32 */
TIPC_NLA_SOCK_COOKIE, /* u64 */
TIPC_NLA_SOCK_PAD, /* flag */
+ TIPC_NLA_SOCK_GROUP, /* nest */
__TIPC_NLA_SOCK_MAX,
TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
@@ -233,6 +234,19 @@ enum {
TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1
};
+/* Nest, socket group info */
+enum {
+ TIPC_NLA_SOCK_GROUP_ID, /* u32 */
+ TIPC_NLA_SOCK_GROUP_OPEN, /* flag */
+ TIPC_NLA_SOCK_GROUP_NODE_SCOPE, /* flag */
+ TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE, /* flag */
+ TIPC_NLA_SOCK_GROUP_INSTANCE, /* u32 */
+ TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, /* u32 */
+
+ __TIPC_NLA_SOCK_GROUP_MAX,
+ TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1
+};
+
/* Nest, connection info */
enum {
TIPC_NLA_CON_UNSPEC,
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644
index 0a87ace34a57..000000000000
--- a/include/uapi/linux/types_32_64.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_TYPES_32_64_H
-#define _UAPI_LINUX_TYPES_32_64_H
-
-/*
- * linux/types_32_64.h
- *
- * Integer type declaration for pointers across 32-bit and 64-bit systems.
- *
- * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <asm/byteorder.h>
-
-#ifdef __BYTE_ORDER
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-# define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#else
-# ifdef __BIG_ENDIAN
-# define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#endif
-
-#ifdef __LP64__
-# define LINUX_FIELD_u32_u64(field) __u64 field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
-#else
-# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
-# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
- field ## _padding = 0, field = (intptr_t)v
-# else
-# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
- field = (intptr_t)v, field ## _padding = 0
-# endif
-#endif
-
-#endif /* _UAPI_LINUX_TYPES_32_64_H */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 74e520fb944f..ddc5396800aa 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -390,33 +390,64 @@ static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_
static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- desc->baSourceID[desc->bNrInPins + 4] :
- 2; /* in UAC2, this value is constant */
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return desc->baSourceID[desc->bNrInPins + 4];
+ case UAC_VERSION_2:
+ return 2; /* in UAC2, this value is constant */
+ case UAC_VERSION_3:
+ return 4; /* in UAC3, this value is constant */
+ default:
+ return 1;
+ }
}
static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- &desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 6];
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return &desc->baSourceID[desc->bNrInPins + 5];
+ case UAC_VERSION_2:
+ return &desc->baSourceID[desc->bNrInPins + 6];
+ case UAC_VERSION_3:
+ return &desc->baSourceID[desc->bNrInPins + 2];
+ default:
+ return NULL;
+ }
}
static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
int protocol)
{
__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
- return *(uac_processing_unit_bmControls(desc, protocol)
- + control_size);
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return *(uac_processing_unit_bmControls(desc, protocol)
+ + control_size);
+ case UAC_VERSION_3:
+ return 0; /* UAC3 does not have this field */
+ }
}
static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
int protocol)
{
__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
- return uac_processing_unit_bmControls(desc, protocol)
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return uac_processing_unit_bmControls(desc, protocol)
+ control_size + 1;
+ case UAC_VERSION_3:
+ return uac_processing_unit_bmControls(desc, protocol)
+ + control_size;
+ }
}
/* 4.5.2 Class-Specific AS Interface Descriptor */
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index 020714d2c5bd..f80f05b3c423 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -28,6 +28,8 @@
#define UVC_CTRL_FLAG_RESTORE (1 << 6)
/* Control can be updated by the camera. */
#define UVC_CTRL_FLAG_AUTO_UPDATE (1 << 7)
+/* Control supports asynchronous reporting */
+#define UVC_CTRL_FLAG_ASYNCHRONOUS (1 << 8)
#define UVC_CTRL_FLAG_GET_RANGE \
(UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_MIN | \
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 8d473c979b61..e4ee10ee917d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -188,7 +188,7 @@ enum v4l2_colorfx {
/* The base for the imx driver controls.
* We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x1090)
+#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
@@ -587,7 +587,23 @@ enum v4l2_vp8_golden_frame_sel {
#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
+
+#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
+enum v4l2_mpeg_video_vp8_profile {
+ V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3,
+};
+/* Deprecated alias for compatibility reasons. */
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
+#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
+enum v4l2_mpeg_video_vp9_profile {
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
+};
/* CIDs for HEVC encoding. */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index c95a53e6743c..03970ce30741 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -170,8 +170,12 @@ struct v4l2_subdev_selection {
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
/* The following ioctls are identical to the ioctls in videodev2.h */
+#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
+#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
+#define VIDIOC_SUBDEV_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid)
#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid)
+#define VIDIOC_SUBDEV_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index c51f8e5cc608..b1e22c40c4b6 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
};
#define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
struct vhost_msg {
int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
};
};
+struct vhost_msg_v2 {
+ __u32 type;
+ __u32 reserved;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+
/* VHOST_NET specific defines */
/* Attach virtio net ring to a raw socket, or tap device.
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 600877be5c22..5d1a3685bea9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -522,6 +522,7 @@ struct v4l2_pix_format {
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
+#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -609,6 +610,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
+ /* 14bit raw bayer packed, 7 bytes for every 4 pixels */
+#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
+#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
+#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E')
+#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E')
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
@@ -636,6 +642,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
+#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -2310,7 +2317,6 @@ struct v4l2_create_buffers {
*
*/
#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
-#define VIDIOC_RESERVED _IO('V', 1)
#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index e3af2859188b..5f3b9fec7b5f 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -305,9 +305,12 @@ enum xfrm_attr_type_t {
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
- XFRMA_OUTPUT_MARK, /* __u32 */
+ XFRMA_SET_MARK, /* __u32 */
+ XFRMA_SET_MARK_MASK, /* __u32 */
+ XFRMA_IF_ID, /* __u32 */
__XFRMA_MAX
+#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
#define XFRMA_MAX (__XFRMA_MAX - 1)
};
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 6d1163456c03..fe4423e518c6 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -5,6 +5,7 @@
* Interface to /dev/xen/gntdev.
*
* Copyright (c) 2007, D G Murray
+ * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
@@ -200,4 +201,109 @@ struct ioctl_gntdev_grant_copy {
/* Send an interrupt on the indicated event channel */
#define UNMAP_NOTIFY_SEND_EVENT 0x2
+/*
+ * Flags to be used while requesting memory mapping's backing storage
+ * to be allocated with DMA API.
+ */
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_wc.
+ */
+#define GNTDEV_DMA_FLAG_WC (1 << 0)
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_coherent.
+ */
+#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
+
+/*
+ * Create a dma-buf [1] from grant references @refs of count @count provided
+ * by the foreign domain @domid with flags @flags.
+ *
+ * By default dma-buf is backed by system memory pages, but by providing
+ * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
+ * a DMA write-combine or coherent buffer, e.g. allocated with dma_alloc_wc/
+ * dma_alloc_coherent.
+ *
+ * Returns 0 if dma-buf was successfully created and the corresponding
+ * dma-buf's file descriptor is returned in @fd.
+ *
+ * [1] Documentation/driver-api/dma-buf.rst
+ */
+
+#define IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS \
+ _IOC(_IOC_NONE, 'G', 9, \
+ sizeof(struct ioctl_gntdev_dmabuf_exp_from_refs))
+struct ioctl_gntdev_dmabuf_exp_from_refs {
+ /* IN parameters. */
+ /* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */
+ __u32 flags;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* OUT parameters. */
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* The domain ID of the grant references to be mapped. */
+ __u32 domid;
+ /* Variable IN parameter. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will block until the dma-buf with the file descriptor @fd is
+ * released. This is only valid for buffers created with
+ * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
+ *
+ * If within @wait_to_ms milliseconds the buffer is not released
+ * then -ETIMEDOUT error is returned.
+ * If the buffer with the file descriptor @fd does not exist or has already
+ * been released, then -ENOENT is returned. For valid file descriptors
+ * this must not be treated as error.
+ */
+#define IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED \
+ _IOC(_IOC_NONE, 'G', 10, \
+ sizeof(struct ioctl_gntdev_dmabuf_exp_wait_released))
+struct ioctl_gntdev_dmabuf_exp_wait_released {
+ /* IN parameters */
+ __u32 fd;
+ __u32 wait_to_ms;
+};
+
+/*
+ * Import a dma-buf with file descriptor @fd and export granted references
+ * to the pages of that dma-buf into array @refs of size @count.
+ */
+#define IOCTL_GNTDEV_DMABUF_IMP_TO_REFS \
+ _IOC(_IOC_NONE, 'G', 11, \
+ sizeof(struct ioctl_gntdev_dmabuf_imp_to_refs))
+struct ioctl_gntdev_dmabuf_imp_to_refs {
+ /* IN parameters. */
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* The domain ID for which references to be granted. */
+ __u32 domid;
+ /* Reserved - must be zero. */
+ __u32 reserved;
+ /* OUT parameters. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will close all references to the imported buffer with file descriptor
+ * @fd, so it can be released by the owner. This is only valid for buffers
+ * created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
+ */
+#define IOCTL_GNTDEV_DMABUF_IMP_RELEASE \
+ _IOC(_IOC_NONE, 'G', 12, \
+ sizeof(struct ioctl_gntdev_dmabuf_imp_release))
+struct ioctl_gntdev_dmabuf_imp_release {
+ /* IN parameters */
+ __u32 fd;
+ __u32 reserved;
+};
+
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index 19aa65a35546..49a53ef8da96 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -38,6 +38,9 @@ enum {
MIPI_DSI_DCS_READ = 0x06,
+ MIPI_DSI_DCS_COMPRESSION_MODE = 0x07,
+ MIPI_DSI_PPS_LONG_WRITE = 0x0A,
+
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
MIPI_DSI_END_OF_TRANSMISSION = 0x08,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 2e37741f6b8d..9bc5bc07d4d3 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -198,6 +198,27 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+struct gnttab_dma_alloc_args {
+ /* Device for which DMA memory will be/was allocated. */
+ struct device *dev;
+ /* If set then DMA buffer is coherent and write-combine otherwise. */
+ bool coherent;
+
+ int nr_pages;
+ struct page **pages;
+ xen_pfn_t *frames;
+ void *vaddr;
+ dma_addr_t dev_bus_addr;
+};
+
+int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
+int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
+#endif
+
+int gnttab_pages_set_private(int nr_pages, struct page **pages);
+void gnttab_pages_clear_private(int nr_pages, struct page **pages);
+
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
new file mode 100644
index 000000000000..80b52b4945e9
--- /dev/null
+++ b/include/xen/mem-reservation.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Xen memory reservation utilities.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ * Copyright (c) 2010 Daniel Kiper
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _XENMEM_RESERVATION_H
+#define _XENMEM_RESERVATION_H
+
+#include <linux/highmem.h>
+
+#include <xen/page.h>
+
+static inline void xenmem_reservation_scrub_page(struct page *page)
+{
+#ifdef CONFIG_XEN_SCRUB_PAGES
+ clear_highpage(page);
+#endif
+}
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+void __xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames);
+
+void __xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages);
+#endif
+
+static inline void xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __xenmem_reservation_va_mapping_update(count, pages, frames);
+#endif
+}
+
+static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __xenmem_reservation_va_mapping_reset(count, pages);
+#endif
+}
+
+int xenmem_reservation_increase(int count, xen_pfn_t *frames);
+
+int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
+
+#endif
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 9d4340c907d1..1e1d9bd0bd37 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -25,12 +25,16 @@ extern bool xen_pvh;
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
#define xen_initial_domain() (xen_domain() && \
- xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+ (xen_start_flags & SIF_INITDOMAIN))
#else /* !CONFIG_XEN_DOM0 */
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */