summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h6
-rw-r--r--include/acpi/acpi_drivers.h1
-rw-r--r--include/acpi/acpiosxf.h10
-rw-r--r--include/acpi/acpixf.h25
-rw-r--r--include/acpi/acrestyp.h1
-rw-r--r--include/acpi/actbl.h4
-rw-r--r--include/acpi/actbl1.h74
-rw-r--r--include/acpi/actbl2.h39
-rw-r--r--include/acpi/actbl3.h66
-rw-r--r--include/acpi/actypes.h48
-rw-r--r--include/acpi/platform/acenv.h44
-rw-r--r--include/acpi/platform/acmsvcex.h54
-rw-r--r--include/acpi/platform/acwinex.h49
-rw-r--r--include/acpi/video.h20
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/asm-generic/io.h4
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/asm-generic/qspinlock.h27
-rw-r--r--include/asm-generic/rwsem.h13
-rw-r--r--include/asm-generic/seccomp.h14
-rw-r--r--include/asm-generic/siginfo.h15
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/clocksource/arm_arch_timer.h12
-rw-r--r--include/crypto/aead.h3
-rw-r--r--include/crypto/hash.h3
-rw-r--r--include/crypto/pkcs7.h6
-rw-r--r--include/crypto/public_key.h33
-rw-r--r--include/crypto/skcipher.h3
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/dt-bindings/clock/ath79-clk.h19
-rw-r--r--include/dt-bindings/clock/microchip,pic32-clock.h42
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h5
-rw-r--r--include/dt-bindings/gpio/meson-gxbb-gpio.h154
-rw-r--r--include/dt-bindings/gpio/tegra-gpio.h68
-rw-r--r--include/dt-bindings/gpio/tegra186-gpio.h56
-rw-r--r--include/dt-bindings/mfd/arizona.h5
-rw-r--r--include/dt-bindings/mfd/max77620.h39
-rw-r--r--include/dt-bindings/pinctrl/hisi.h59
-rw-r--r--include/dt-bindings/power/r8a7779-sysc.h27
-rw-r--r--include/dt-bindings/power/r8a7790-sysc.h34
-rw-r--r--include/dt-bindings/power/r8a7791-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a7793-sysc.h28
-rw-r--r--include/dt-bindings/power/r8a7794-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a7795-sysc.h42
-rw-r--r--include/dt-bindings/power/rk3399-power.h53
-rw-r--r--include/keys/asymmetric-subtype.h2
-rw-r--r--include/keys/asymmetric-type.h13
-rw-r--r--include/keys/system_keyring.h41
-rw-r--r--include/kvm/arm_vgic.h7
-rw-r--r--include/linux/acpi.h13
-rw-r--r--include/linux/amba/pl08x.h2
-rw-r--r--include/linux/apple-gmux.h2
-rw-r--r--include/linux/ata.h3
-rw-r--r--include/linux/ath9k_platform.h4
-rw-r--r--include/linux/atomic.h34
-rw-r--r--include/linux/audit.h24
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backlight.h3
-rw-r--r--include/linux/bcm47xx_sprom.h24
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/bitops.h16
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/blktrace_api.h9
-rw-r--r--include/linux/bootmem.h16
-rw-r--r--include/linux/bpf.h12
-rw-r--r--include/linux/brcmphy.h2
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/can/dev.h22
-rw-r--r--include/linux/ccp.h36
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/compaction.h6
-rw-r--r--include/linux/compiler-gcc.h5
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/cpu.h18
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h54
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpumask.h6
-rw-r--r--include/linux/cpuset.h48
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/dcache.h48
-rw-r--r--include/linux/debugobjects.h17
-rw-r--r--include/linux/devfreq.h99
-rw-r--r--include/linux/device.h17
-rw-r--r--include/linux/devpts_fs.h38
-rw-r--r--include/linux/dma-iommu.h4
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dma/dw.h5
-rw-r--r--include/linux/dma/xilinx_dma.h14
-rw-r--r--include/linux/dmaengine.h20
-rw-r--r--include/linux/efi.h167
-rw-r--r--include/linux/ethtool.h7
-rw-r--r--include/linux/f2fs_fs.h4
-rw-r--r--include/linux/file.h13
-rw-r--r--include/linux/filter.h68
-rw-r--r--include/linux/fs.h139
-rw-r--r--include/linux/fscrypto.h9
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/ftrace.h1
-rw-r--r--include/linux/genl_magic_struct.h7
-rw-r--r--include/linux/gpio/driver.h25
-rw-r--r--include/linux/hash.h20
-rw-r--r--include/linux/huge_mm.h11
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/hugetlb_inline.h6
-rw-r--r--include/linux/i2c-mux.h55
-rw-r--r--include/linux/i2c.h50
-rw-r--r--include/linux/i2c/sx150x.h82
-rw-r--r--include/linux/ieee80211.h24
-rw-r--r--include/linux/ieee802154.h45
-rw-r--r--include/linux/if_ether.h5
-rw-r--r--include/linux/ima.h6
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h25
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h25
-rw-r--r--include/linux/iommu.h8
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipv6.h20
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/irqbypass.h4
-rw-r--r--include/linux/irqchip/arm-gic-common.h34
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h59
-rw-r--r--include/linux/irqchip/mips-gic.h17
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqdomain.h20
-rw-r--r--include/linux/iscsi_boot_sysfs.h13
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kernfs.h2
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/key.h44
-rw-r--r--include/linux/kvm_host.h39
-rw-r--r--include/linux/leds.h8
-rw-r--r--include/linux/lightnvm.h48
-rw-r--r--include/linux/livepatch.h26
-rw-r--r--include/linux/lockdep.h46
-rw-r--r--include/linux/lsm_hooks.h39
-rw-r--r--include/linux/mdio.h11
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/memory_hotplug.h6
-rw-r--r--include/linux/mempolicy.h16
-rw-r--r--include/linux/mempool.h3
-rw-r--r--include/linux/mfd/as3722.h1
-rw-r--r--include/linux/mfd/axp20x.h59
-rw-r--r--include/linux/mfd/core.h8
-rw-r--r--include/linux/mfd/hi655x-pmic.h55
-rw-r--r--include/linux/mfd/max77620.h346
-rw-r--r--include/linux/mfd/samsung/core.h3
-rw-r--r--include/linux/mfd/samsung/s2mps11.h2
-rw-r--r--include/linux/mfd/syscon.h1
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h7
-rw-r--r--include/linux/mfd/tmio.h4
-rw-r--r--include/linux/mfd/wm8400-private.h1
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/mlx5/cq.h5
-rw-r--r--include/linux/mlx5/device.h115
-rw-r--r--include/linux/mlx5/driver.h45
-rw-r--r--include/linux/mlx5/fs.h23
-rw-r--r--include/linux/mlx5/mlx5_ifc.h354
-rw-r--r--include/linux/mlx5/port.h31
-rw-r--r--include/linux/mlx5/qp.h6
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h113
-rw-r--r--include/linux/mm_inline.h24
-rw-r--r--include/linux/mm_types.h16
-rw-r--r--include/linux/mmc/dw_mmc.h12
-rw-r--r--include/linux/mmc/host.h31
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h10
-rw-r--r--include/linux/mmc/tmio.h71
-rw-r--r--include/linux/mmu_context.h7
-rw-r--r--include/linux/mmzone.h46
-rw-r--r--include/linux/module.h25
-rw-r--r--include/linux/mtd/mtd.h12
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/net.h13
-rw-r--r--include/linux/netdev_features.h29
-rw-r--r--include/linux/netdevice.h93
-rw-r--r--include/linux/netfilter/ipset/ip_set.h13
-rw-r--r--include/linux/netfilter/x_tables.h18
-rw-r--r--include/linux/nfs_fs.h16
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nfs_xdr.h4
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/nl802154.h2
-rw-r--r--include/linux/nodemask.h11
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/linux/of.h67
-rw-r--r--include/linux/of_address.h9
-rw-r--r--include/linux/of_fdt.h5
-rw-r--r--include/linux/of_graph.h1
-rw-r--r--include/linux/of_iommu.h8
-rw-r--r--include/linux/oom.h8
-rw-r--r--include/linux/padata.h5
-rw-r--r--include/linux/page-flags.h29
-rw-r--r--include/linux/page_ref.h26
-rw-r--r--include/linux/pagemap.h40
-rw-r--r--include/linux/pci.h19
-rw-r--r--include/linux/pci_ids.h18
-rw-r--r--include/linux/pcieport_if.h2
-rw-r--r--include/linux/perf/arm_pmu.h2
-rw-r--r--include/linux/perf_event.h143
-rw-r--r--include/linux/phy.h8
-rw-r--r--include/linux/phy/phy.h31
-rw-r--r--include/linux/phy/tegra/xusb.h30
-rw-r--r--include/linux/pinctrl/pinctrl.h6
-rw-r--r--include/linux/platform_data/dma-dw.h12
-rw-r--r--include/linux/platform_data/gpio-dwapb.h3
-rw-r--r--include/linux/platform_data/media/ir-rx51.h1
-rw-r--r--include/linux/platform_data/pwm_omap_dmtimer.h21
-rw-r--r--include/linux/platform_device.h6
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/pm_opp.h62
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pmem.h40
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/poll.h11
-rw-r--r--include/linux/posix_acl.h1
-rw-r--r--include/linux/property.h15
-rw-r--r--include/linux/proportions.h137
-rw-r--r--include/linux/psci.h2
-rw-r--r--include/linux/pwm.h33
-rw-r--r--include/linux/qed/common_hsi.h62
-rw-r--r--include/linux/qed/qed_eth_if.h22
-rw-r--r--include/linux/qed/qed_if.h117
-rw-r--r--include/linux/qed/qed_iov_if.h34
-rw-r--r--include/linux/rculist.h36
-rw-r--r--include/linux/rculist_nulls.h39
-rw-r--r--include/linux/rcupdate.h30
-rw-r--r--include/linux/rcutiny.h16
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regulator/act8865.h2
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/max8973-regulator.h5
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/reset.h194
-rw-r--r--include/linux/rhashtable.h3
-rw-r--r--include/linux/rpmsg.h18
-rw-r--r--include/linux/rwsem-spinlock.h2
-rw-r--r--include/linux/rwsem.h3
-rw-r--r--include/linux/scatterlist.h25
-rw-r--r--include/linux/sched.h154
-rw-r--r--include/linux/sctp.h67
-rw-r--r--include/linux/security.h78
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/signal.h19
-rw-r--r--include/linux/skbuff.h56
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/soc/qcom/smd.h61
-rw-r--r--include/linux/soc/qcom/smem_state.h35
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h16
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/string_helpers.h6
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h14
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/time64.h17
-rw-r--r--include/linux/timekeeping.h17
-rw-r--r--include/linux/trace_events.h134
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/u64_stats_sync.h14
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h1
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/verification.h49
-rw-r--r--include/linux/verify_pefile.h22
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/xattr.h5
-rw-r--r--include/media/media-device.h13
-rw-r--r--include/media/media-entity.h81
-rw-r--r--include/media/rc-core.h18
-rw-r--r--include/media/v4l2-dev.h3
-rw-r--r--include/media/v4l2-device.h55
-rw-r--r--include/media/v4l2-rect.h173
-rw-r--r--include/media/v4l2-subdev.h8
-rw-r--r--include/media/v4l2-tpg-colors.h68
-rw-r--r--include/media/v4l2-tpg.h597
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/media/vsp1.h23
-rw-r--r--include/misc/cxl.h8
-rw-r--r--include/net/6lowpan.h37
-rw-r--r--include/net/act_api.h15
-rw-r--r--include/net/af_rxrpc.h4
-rw-r--r--include/net/bluetooth/hci.h2
-rw-r--r--include/net/cfg80211.h158
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/codel.h217
-rw-r--r--include/net/codel_impl.h255
-rw-r--r--include/net/codel_qdisc.h73
-rw-r--r--include/net/devlink.h61
-rw-r--r--include/net/dsa.h48
-rw-r--r--include/net/dst.h5
-rw-r--r--include/net/fou.h10
-rw-r--r--include/net/fq.h95
-rw-r--r--include/net/fq_impl.h277
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/geneve.h6
-rw-r--r--include/net/gre.h104
-rw-r--r--include/net/gtp.h34
-rw-r--r--include/net/icmp.h4
-rw-r--r--include/net/inet6_hashtables.h12
-rw-r--r--include/net/inet_common.h5
-rw-r--r--include/net/inet_hashtables.h47
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h69
-rw-r--r--include/net/ip_tunnels.h97
-rw-r--r--include/net/ip_vs.h17
-rw-r--r--include/net/ipv6.h70
-rw-r--r--include/net/l3mdev.h68
-rw-r--r--include/net/mac80211.h96
-rw-r--r--include/net/mac802154.h10
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h108
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h1
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h3
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h5
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netlink.h121
-rw-r--r--include/net/netns/conntrack.h10
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/nfc/nci_core.h17
-rw-r--r--include/net/nl802154.h6
-rw-r--r--include/net/pkt_cls.h21
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/request_sock.h31
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/rtnetlink.h7
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sctp/sctp.h44
-rw-r--r--include/net/sctp/structs.h21
-rw-r--r--include/net/snmp.h40
-rw-r--r--include/net/sock.h176
-rw-r--r--include/net/switchdev.h6
-rw-r--r--include/net/tc_act/tc_mirred.h15
-rw-r--r--include/net/tcp.h83
-rw-r--r--include/net/transp_v6.h4
-rw-r--r--include/net/udp.h56
-rw-r--r--include/net/udp_tunnel.h19
-rw-r--r--include/net/vxlan.h81
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/rdma/ib_verbs.h61
-rw-r--r--include/rdma/mr_pool.h25
-rw-r--r--include/rdma/rdma_vt.h1
-rw-r--r--include/rdma/rdmavt_qp.h5
-rw-r--r--include/rdma/rw.h88
-rw-r--r--include/rxrpc/packet.h2
-rw-r--r--include/scsi/scsi.h19
-rw-r--r--include/scsi/scsi_device.h35
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h9
-rw-r--r--include/soc/at91/atmel-sfr.h18
-rw-r--r--include/soc/nps/common.h166
-rw-r--r--include/soc/tegra/fuse.h1
-rw-r--r--include/soc/tegra/pmc.h36
-rw-r--r--include/sound/dmaengine_pcm.h12
-rw-r--r--include/sound/hda_chmap.h2
-rw-r--r--include/sound/hda_i915.h15
-rw-r--r--include/sound/hda_regmap.h2
-rw-r--r--include/sound/hdaudio_ext.h13
-rw-r--r--include/sound/hdmi-codec.h100
-rw-r--r--include/sound/pcm_iec958.h2
-rw-r--r--include/sound/soc-dapm.h3
-rw-r--r--include/sound/soc.h5
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_fabric.h6
-rw-r--r--include/trace/events/btrfs.h89
-rw-r--r--include/trace/events/ext4.h6
-rw-r--r--include/trace/events/kvm.h11
-rw-r--r--include/trace/events/mmc.h182
-rw-r--r--include/trace/events/page_isolation.h2
-rw-r--r--include/trace/events/rcu.h79
-rw-r--r--include/trace/events/scsi.h6
-rw-r--r--include/trace/perf.h16
-rw-r--r--include/trace/trace_events.h3
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/bpf.h8
-rw-r--r--include/uapi/linux/devlink.h63
-rw-r--r--include/uapi/linux/elf.h10
-rw-r--r--include/uapi/linux/fib_rules.h1
-rw-r--r--include/uapi/linux/fs.h2
-rw-r--r--include/uapi/linux/gen_stats.h1
-rw-r--r--include/uapi/linux/gtp.h33
-rw-r--r--include/uapi/linux/i2c.h13
-rw-r--r--include/uapi/linux/if.h28
-rw-r--r--include/uapi/linux/if_bridge.h18
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_link.h62
-rw-r--r--include/uapi/linux/if_macsec.h14
-rw-r--r--include/uapi/linux/ila.h8
-rw-r--r--include/uapi/linux/inet_diag.h6
-rw-r--r--include/uapi/linux/ip_vs.h1
-rw-r--r--include/uapi/linux/keyctl.h10
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/l2tp.h2
-rw-r--r--include/uapi/linux/libc-compat.h44
-rw-r--r--include/uapi/linux/lwtunnel.h2
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/neighbour.h2
-rw-r--r--include/uapi/linux/net_tstamp.h10
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h9
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_acct.h1
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h10
-rw-r--r--include/uapi/linux/nl80211.h110
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/pci_regs.h20
-rw-r--r--include/uapi/linux/perf_event.h4
-rw-r--r--include/uapi/linux/pkt_cls.h6
-rw-r--r--include/uapi/linux/pkt_sched.h7
-rw-r--r--include/uapi/linux/qrtr.h12
-rw-r--r--include/uapi/linux/quota.h1
-rw-r--r--include/uapi/linux/rio_mport_cdev.h (renamed from include/linux/rio_mport_cdev.h)144
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/signal.h5
-rw-r--r--include/uapi/linux/sock_diag.h1
-rw-r--r--include/uapi/linux/stddef.h4
-rw-r--r--include/uapi/linux/swab.h24
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h1
-rw-r--r--include/uapi/linux/tc_act/tc_connmark.h1
-rw-r--r--include/uapi/linux/tc_act/tc_csum.h1
-rw-r--r--include/uapi/linux/tc_act/tc_defact.h1
-rw-r--r--include/uapi/linux/tc_act/tc_gact.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h1
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h1
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/tc_act/tc_nat.h1
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h1
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h1
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h1
-rw-r--r--include/uapi/linux/tcp_metrics.h1
-rw-r--r--include/uapi/linux/udp.h3
-rw-r--r--include/uapi/linux/usb/ch9.h2
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--include/uapi/linux/videodev2.h38
-rw-r--r--include/uapi/linux/virtio_config.h2
-rw-r--r--include/uapi/linux/xfrm.h1
-rw-r--r--include/uapi/rdma/ib_user_verbs.h1
-rw-r--r--include/uapi/sound/asound.h2
-rw-r--r--include/video/imx-ipu-v3.h7
-rw-r--r--include/video/sh_mipi_dsi.h59
-rw-r--r--include/xen/page.h4
466 files changed, 10152 insertions, 2980 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 14362a84c78e..4d40e9b5d938 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -87,7 +87,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, int rev, int func,
.package.elements = (eles) \
}
-bool acpi_dev_present(const char *hid);
+bool acpi_dev_found(const char *hid);
#ifdef CONFIG_ACPI
@@ -394,13 +394,13 @@ struct acpi_data_node {
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
- return fwnode && (fwnode->type == FWNODE_ACPI
+ return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
|| fwnode->type == FWNODE_ACPI_DATA);
}
static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_ACPI;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
}
static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 29c691265b49..797ae2ec8eee 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -78,7 +78,6 @@
/* ACPI PCI Interrupt Link (pci_link.c) */
-int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int acpi_pci_link_free_irq(acpi_handle handle);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d1e34d1eeea6..562603d7aabe 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -96,7 +96,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- char **new_val);
+ acpi_string *new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override
@@ -108,7 +108,7 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_physical_table_override
acpi_status
acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address * new_address,
+ acpi_physical_address *new_address,
u32 *new_table_length);
#endif
@@ -203,7 +203,7 @@ void acpi_os_unmap_memory(void *logical_address, acpi_size size);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_physical_address
acpi_status
acpi_os_get_physical_address(void *logical_address,
- acpi_physical_address * physical_address);
+ acpi_physical_address *physical_address);
#endif
/*
@@ -379,14 +379,14 @@ acpi_status
acpi_os_get_table_by_name(char *signature,
u32 instance,
struct acpi_table_header **table,
- acpi_physical_address * address);
+ acpi_physical_address *address);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index
acpi_status
acpi_os_get_table_by_index(u32 index,
struct acpi_table_header **table,
- u32 *instance, acpi_physical_address * address);
+ u32 *instance, acpi_physical_address *address);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 17556979dc79..4e4c21491c41 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20160108
+#define ACPI_CA_VERSION 0x20160422
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -192,7 +192,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
* Optionally support group module level code.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -484,8 +484,8 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init
- acpi_find_root_pointer(acpi_physical_address *
- rsdp_address))
+ acpi_find_root_pointer(acpi_physical_address
+ *rsdp_address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_table_header(acpi_string signature,
u32 instance,
@@ -530,7 +530,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_handle(acpi_handle parent,
acpi_string pathname,
- acpi_handle * ret_handle))
+ acpi_handle *ret_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_attach_data(acpi_handle object,
acpi_object_handler handler,
@@ -575,15 +575,15 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
acpi_handle child,
- acpi_handle * out_handle))
+ acpi_handle *out_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_type(acpi_handle object,
- acpi_object_type * out_type))
+ acpi_object_type *out_type))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_parent(acpi_handle object,
- acpi_handle * out_handle))
+ acpi_handle *out_handle))
/*
* Handler interfaces
@@ -755,7 +755,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
- acpi_handle * gpe_device))
+ acpi_handle *gpe_device))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_install_gpe_block(acpi_handle gpe_device,
@@ -771,8 +771,8 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
* Resource interfaces
*/
typedef
-acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
- void *context);
+acpi_status (*acpi_walk_resource_callback) (struct acpi_resource * resource,
+ void *context);
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_vendor_resource(acpi_handle device,
@@ -938,7 +938,8 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(void
ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
void ACPI_INTERNAL_VAR_XFACE
acpi_log_error(const char *format, ...))
- acpi_status acpi_initialize_debugger(void);
+
+acpi_status acpi_initialize_debugger(void);
void acpi_terminate_debugger(void);
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index cf2acb84dfeb..16c189283ea0 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -417,6 +417,7 @@ struct acpi_resource_gpio {
u8 type; \
u8 producer_consumer; /* For values, see Producer/Consumer above */\
u8 slave_mode; \
+ u8 connection_sharing; \
u8 type_revision_id; \
u16 type_data_length; \
u16 vendor_length; \
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 0cb1a0036986..c19700e2a2fe 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -223,7 +223,7 @@ struct acpi_table_facs {
/*******************************************************************************
*
* FADT - Fixed ACPI Description Table (Signature "FACP")
- * Version 4
+ * Version 6
*
******************************************************************************/
@@ -413,4 +413,6 @@ struct acpi_table_desc {
#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)"
+
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 16e013600c19..796d6baae3a3 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -236,7 +236,8 @@ enum acpi_einj_actions {
ACPI_EINJ_CHECK_BUSY_STATUS = 6,
ACPI_EINJ_GET_COMMAND_STATUS = 7,
ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
- ACPI_EINJ_ACTION_RESERVED = 9, /* 9 and greater are reserved */
+ ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
+ ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */
ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
};
@@ -348,7 +349,8 @@ enum acpi_erst_actions {
ACPI_ERST_GET_ERROR_RANGE = 13,
ACPI_ERST_GET_ERROR_LENGTH = 14,
ACPI_ERST_GET_ERROR_ATTRIBUTES = 15,
- ACPI_ERST_ACTION_RESERVED = 16 /* 16 and greater are reserved */
+ ACPI_ERST_EXECUTE_TIMINGS = 16,
+ ACPI_ERST_ACTION_RESERVED = 17 /* 17 and greater are reserved */
};
/* Values for Instruction field above */
@@ -427,7 +429,8 @@ enum acpi_hest_types {
ACPI_HEST_TYPE_AER_ENDPOINT = 7,
ACPI_HEST_TYPE_AER_BRIDGE = 8,
ACPI_HEST_TYPE_GENERIC_ERROR = 9,
- ACPI_HEST_TYPE_RESERVED = 10 /* 10 and greater are reserved */
+ ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10,
+ ACPI_HEST_TYPE_RESERVED = 11 /* 11 and greater are reserved */
};
/*
@@ -506,7 +509,11 @@ enum acpi_hest_notify_types {
ACPI_HEST_NOTIFY_NMI = 4,
ACPI_HEST_NOTIFY_CMCI = 5, /* ACPI 5.0 */
ACPI_HEST_NOTIFY_MCE = 6, /* ACPI 5.0 */
- ACPI_HEST_NOTIFY_RESERVED = 7 /* 7 and greater are reserved */
+ ACPI_HEST_NOTIFY_GPIO = 7, /* ACPI 6.0 */
+ ACPI_HEST_NOTIFY_SEA = 8, /* ACPI 6.1 */
+ ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */
+ ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */
+ ACPI_HEST_NOTIFY_RESERVED = 11 /* 11 and greater are reserved */
};
/* Values for config_write_enable bitfield above */
@@ -603,6 +610,24 @@ struct acpi_hest_generic {
u32 error_block_length;
};
+/* 10: Generic Hardware Error Source, version 2 */
+
+struct acpi_hest_generic_v2 {
+ struct acpi_hest_header header;
+ u16 related_source_id;
+ u8 reserved;
+ u8 enabled;
+ u32 records_to_preallocate;
+ u32 max_sections_per_record;
+ u32 max_raw_data_length;
+ struct acpi_generic_address error_status_address;
+ struct acpi_hest_notify notify;
+ u32 error_block_length;
+ struct acpi_generic_address read_ack_register;
+ u64 read_ack_preserve;
+ u64 read_ack_write;
+};
+
/* Generic Error Status block */
struct acpi_hest_generic_status {
@@ -634,6 +659,33 @@ struct acpi_hest_generic_data {
u8 fru_text[20];
};
+/* Extension for revision 0x0300 */
+
+struct acpi_hest_generic_data_v300 {
+ u8 section_type[16];
+ u32 error_severity;
+ u16 revision;
+ u8 validation_bits;
+ u8 flags;
+ u32 error_data_length;
+ u8 fru_id[16];
+ u8 fru_text[20];
+ u64 time_stamp;
+};
+
+/* Values for error_severity above */
+
+#define ACPI_HEST_GEN_ERROR_RECOVERABLE 0
+#define ACPI_HEST_GEN_ERROR_FATAL 1
+#define ACPI_HEST_GEN_ERROR_CORRECTED 2
+#define ACPI_HEST_GEN_ERROR_NONE 3
+
+/* Flags for validation_bits above */
+
+#define ACPI_HEST_GEN_VALID_FRU_ID (1)
+#define ACPI_HEST_GEN_VALID_FRU_STRING (1<<1)
+#define ACPI_HEST_GEN_VALID_TIMESTAMP (1<<2)
+
/*******************************************************************************
*
* MADT - Multiple APIC Description Table
@@ -934,7 +986,7 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
- * NFIT - NVDIMM Interface Table (ACPI 6.0)
+ * NFIT - NVDIMM Interface Table (ACPI 6.0+)
* Version 1
*
******************************************************************************/
@@ -1015,6 +1067,7 @@ struct acpi_nfit_memory_map {
#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */
#define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */
#define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */
+#define ACPI_NFIT_MEM_MAP_FAILED (1<<6) /* 06: Mapping to SPA failed */
/* 2: Interleave Structure */
@@ -1046,7 +1099,10 @@ struct acpi_nfit_control_region {
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 subsystem_revision_id;
- u8 reserved[6]; /* Reserved, must be zero */
+ u8 valid_fields;
+ u8 manufacturing_location;
+ u16 manufacturing_date;
+ u8 reserved[2]; /* Reserved, must be zero */
u32 serial_number;
u16 code;
u16 windows;
@@ -1061,7 +1117,11 @@ struct acpi_nfit_control_region {
/* Flags */
-#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+#define ACPI_NFIT_CONTROL_BUFFERED (1) /* Block Data Windows implementation is buffered */
+
+/* valid_fields bits */
+
+#define ACPI_NFIT_CONTROL_MFG_INFO_VALID (1) /* Manufacturing fields are valid */
/* 5: NVDIMM Block Data Window Region Structure */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a4ef62537cac..c93dbadfc71d 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -321,7 +321,7 @@ struct acpi_csrt_descriptor {
* DBG2 - Debug Port Table 2
* Version 0 (Both main table and subtables)
*
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", May 22 2012.
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
*
******************************************************************************/
@@ -371,6 +371,11 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_COMPATIBLE 0x0000
#define ACPI_DBG2_16550_SUBSET 0x0001
+#define ACPI_DBG2_ARM_PL011 0x0003
+#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
+#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
+#define ACPI_DBG2_ARM_DCC 0x000F
+#define ACPI_DBG2_BCM2835 0x0010
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -399,7 +404,7 @@ struct acpi_table_dbgp {
* Version 1
*
* Conforms to "Intel Virtualization Technology for Directed I/O",
- * Version 2.2, Sept. 2013
+ * Version 2.3, October 2014
*
******************************************************************************/
@@ -413,6 +418,8 @@ struct acpi_table_dmar {
/* Masks for Flags field above */
#define ACPI_DMAR_INTR_REMAP (1)
+#define ACPI_DMAR_X2APIC_OPT_OUT (1<<1)
+#define ACPI_DMAR_X2APIC_MODE (1<<2)
/* DMAR subtable header */
@@ -655,7 +662,7 @@ struct acpi_ibft_target {
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049A, 2015
+ * Document number: ARM DEN 0049B, October 2015
*
******************************************************************************/
@@ -685,7 +692,8 @@ enum acpi_iort_node_type {
ACPI_IORT_NODE_ITS_GROUP = 0x00,
ACPI_IORT_NODE_NAMED_COMPONENT = 0x01,
ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
- ACPI_IORT_NODE_SMMU = 0x03
+ ACPI_IORT_NODE_SMMU = 0x03,
+ ACPI_IORT_NODE_SMMU_V3 = 0x04
};
struct acpi_iort_id_mapping {
@@ -775,6 +783,23 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+struct acpi_iort_smmu_v3 {
+ u64 base_address; /* SMMUv3 base address */
+ u32 flags;
+ u32 reserved;
+ u64 vatos_address;
+ u32 model; /* O: generic SMMUv3 */
+ u32 event_gsiv;
+ u32 pri_gsiv;
+ u32 gerr_gsiv;
+ u32 sync_gsiv;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
+#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (1<<1)
+
/*******************************************************************************
*
* IVRS - I/O Virtualization Reporting Structure
@@ -1102,10 +1127,10 @@ struct acpi_table_slic {
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
- * Version 1
+ * Version 2
*
* Conforms to "Serial Port Console Redirection Table",
- * Version 1.00, January 11, 2002
+ * Version 1.03, August 10, 2015
*
******************************************************************************/
@@ -1137,6 +1162,8 @@ struct acpi_table_spcr {
#define ACPI_SPCR_DO_NOT_DISABLE (1)
+/* Values for Interface Type: See the definition of the DBG2 table */
+
/*******************************************************************************
*
* SPMI - Server Platform Management Interface table
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index ddf5e66c3b15..ebc1f4f9fe66 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -184,7 +184,7 @@ struct acpi_table_fpdt {
struct acpi_table_header header; /* Common ACPI table header */
};
-/* FPDT subtable header */
+/* FPDT subtable header (Performance Record Structure) */
struct acpi_fpdt_header {
u16 type;
@@ -205,19 +205,15 @@ enum acpi_fpdt_type {
/* 0: Firmware Basic Boot Performance Record */
-struct acpi_fpdt_boot {
+struct acpi_fpdt_boot_pointer {
struct acpi_fpdt_header header;
u8 reserved[4];
- u64 reset_end;
- u64 load_start;
- u64 startup_start;
- u64 exit_services_entry;
- u64 exit_services_exit;
+ u64 address;
};
/* 1: S3 Performance Table Pointer Record */
-struct acpi_fpdt_s3pt_ptr {
+struct acpi_fpdt_s3pt_pointer {
struct acpi_fpdt_header header;
u8 reserved[4];
u64 address;
@@ -225,7 +221,7 @@ struct acpi_fpdt_s3pt_ptr {
/*
* S3PT - S3 Performance Table. This table is pointed to by the
- * FPDT S3 Pointer Record above.
+ * S3 Pointer Record above.
*/
struct acpi_table_s3pt {
u8 signature[4]; /* "S3PT" */
@@ -233,34 +229,43 @@ struct acpi_table_s3pt {
};
/*
- * S3PT Subtables
+ * S3PT Subtables (Not part of the actual FPDT)
*/
-struct acpi_s3pt_header {
- u16 type;
- u8 length;
- u8 revision;
-};
-/* Values for Type field above */
+/* Values for Type field in S3PT header */
enum acpi_s3pt_type {
ACPI_S3PT_TYPE_RESUME = 0,
- ACPI_S3PT_TYPE_SUSPEND = 1
+ ACPI_S3PT_TYPE_SUSPEND = 1,
+ ACPI_FPDT_BOOT_PERFORMANCE = 2
};
struct acpi_s3pt_resume {
- struct acpi_s3pt_header header;
+ struct acpi_fpdt_header header;
u32 resume_count;
u64 full_resume;
u64 average_resume;
};
struct acpi_s3pt_suspend {
- struct acpi_s3pt_header header;
+ struct acpi_fpdt_header header;
u64 suspend_start;
u64 suspend_end;
};
+/*
+ * FPDT Boot Performance Record (Not part of the actual FPDT)
+ */
+struct acpi_fpdt_boot {
+ struct acpi_fpdt_header header;
+ u8 reserved[4];
+ u64 reset_end;
+ u64 load_start;
+ u64 startup_start;
+ u64 exit_services_entry;
+ u64 exit_services_exit;
+};
+
/*******************************************************************************
*
* GTDT - Generic Timer Description Table (ACPI 5.1)
@@ -476,7 +481,8 @@ struct acpi_table_pcct {
enum acpi_pcct_type {
ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
- ACPI_PCCT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */
+ ACPI_PCCT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
};
/*
@@ -515,6 +521,26 @@ struct acpi_pcct_hw_reduced {
u16 min_turnaround_time;
};
+/* 2: HW-reduced Communications Subspace Type 2 (ACPI 6.1) */
+
+struct acpi_pcct_hw_reduced_type2 {
+ struct acpi_subtable_header header;
+ u32 doorbell_interrupt;
+ u8 flags;
+ u8 reserved;
+ u64 base_address;
+ u64 length;
+ struct acpi_generic_address doorbell_register;
+ u64 preserve_mask;
+ u64 write_mask;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+ struct acpi_generic_address doorbell_ack_register;
+ u64 ack_preserve_mask;
+ u64 ack_write_mask;
+};
+
/* Values for doorbell flags above */
#define ACPI_PCCT_INTERRUPT_POLARITY (1)
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index db46546d3b9d..cb389efd321c 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -630,7 +630,8 @@ typedef u64 acpi_integer;
#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
#define ACPI_NOTIFY_AFFINITY_UPDATE (u8) 0x0D
-#define ACPI_NOTIFY_MAX 0x0D
+#define ACPI_GENERIC_NOTIFY_MAX 0x0D
+#define ACPI_SPECIFIC_NOTIFY_MAX 0x84
/*
* Types associated with ACPI names and objects. The first group of
@@ -892,7 +893,7 @@ typedef u8 acpi_adr_space_type;
/* Sleep function dispatch */
-typedef acpi_status(*acpi_sleep_function) (u8 sleep_state);
+typedef acpi_status (*acpi_sleep_function) (u8 sleep_state);
struct acpi_sleep_functions {
acpi_sleep_function legacy_function;
@@ -994,7 +995,7 @@ struct acpi_buffer {
* Predefined Namespace items
*/
struct acpi_predefined_names {
- char *name;
+ const char *name;
u8 type;
char *val;
};
@@ -1071,20 +1072,21 @@ void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
typedef
void (*acpi_object_handler) (acpi_handle object, void *data);
-typedef acpi_status(*acpi_init_handler) (acpi_handle object, u32 function);
+typedef
+acpi_status (*acpi_init_handler) (acpi_handle object, u32 function);
#define ACPI_INIT_DEVICE_INI 1
typedef
-acpi_status(*acpi_exception_handler) (acpi_status aml_status,
- acpi_name name,
- u16 opcode,
- u32 aml_offset, void *context);
+acpi_status (*acpi_exception_handler) (acpi_status aml_status,
+ acpi_name name,
+ u16 opcode,
+ u32 aml_offset, void *context);
/* Table Event handler (Load, load_table, etc.) and types */
typedef
-acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
+acpi_status (*acpi_table_handler) (u32 event, void *table, void *context);
#define ACPI_TABLE_LOAD 0x0
#define ACPI_TABLE_UNLOAD 0x1
@@ -1093,12 +1095,12 @@ acpi_status(*acpi_table_handler) (u32 event, void *table, void *context);
/* Address Spaces (For Operation Regions) */
typedef
-acpi_status(*acpi_adr_space_handler) (u32 function,
- acpi_physical_address address,
- u32 bit_width,
- u64 *value,
- void *handler_context,
- void *region_context);
+acpi_status (*acpi_adr_space_handler) (u32 function,
+ acpi_physical_address address,
+ u32 bit_width,
+ u64 *value,
+ void *handler_context,
+ void *region_context);
#define ACPI_DEFAULT_HANDLER NULL
@@ -1111,18 +1113,18 @@ struct acpi_connection_info {
};
typedef
-acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
- u32 function,
- void *handler_context,
- void **region_context);
+acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
+ u32 function,
+ void *handler_context,
+ void **region_context);
#define ACPI_REGION_ACTIVATE 0
#define ACPI_REGION_DEACTIVATE 1
typedef
-acpi_status(*acpi_walk_callback) (acpi_handle object,
- u32 nesting_level,
- void *context, void **return_value);
+acpi_status (*acpi_walk_callback) (acpi_handle object,
+ u32 nesting_level,
+ void *context, void **return_value);
typedef
u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported);
@@ -1227,7 +1229,7 @@ struct acpi_mem_space_context {
* struct acpi_memory_list is used only if the ACPICA local cache is enabled
*/
struct acpi_memory_list {
- char *list_name;
+ const char *list_name;
void *list_head;
u16 object_size;
u16 max_depth;
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 7c0595bde132..86b5a8447606 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -66,17 +66,28 @@
*
*****************************************************************************/
+/* Common application configuration. All single threaded except for acpi_exec. */
+
+#if (defined ACPI_ASL_COMPILER) || \
+ (defined ACPI_BIN_APP) || \
+ (defined ACPI_DUMP_APP) || \
+ (defined ACPI_HELP_APP) || \
+ (defined ACPI_NAMES_APP) || \
+ (defined ACPI_SRC_APP) || \
+ (defined ACPI_XTRACT_APP) || \
+ (defined ACPI_EXAMPLE_APP)
+#define ACPI_APPLICATION
+#define ACPI_SINGLE_THREADED
+#endif
+
/* iASL configuration */
#ifdef ACPI_ASL_COMPILER
-#define ACPI_APPLICATION
#define ACPI_DEBUG_OUTPUT
#define ACPI_CONSTANT_EVAL_ONLY
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
-#define ACPI_SINGLE_THREADED
#define ACPI_32BIT_PHYSICAL_ADDRESS
-
#define ACPI_DISASSEMBLER 1
#endif
@@ -89,21 +100,6 @@
#define ACPI_DBG_TRACK_ALLOCATIONS
#endif
-/*
- * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
- * configuration. All single threaded.
- */
-#if (defined ACPI_BIN_APP) || \
- (defined ACPI_DUMP_APP) || \
- (defined ACPI_HELP_APP) || \
- (defined ACPI_NAMES_APP) || \
- (defined ACPI_SRC_APP) || \
- (defined ACPI_XTRACT_APP) || \
- (defined ACPI_EXAMPLE_APP)
-#define ACPI_APPLICATION
-#define ACPI_SINGLE_THREADED
-#endif
-
/* acpi_help configuration. Error messages disabled. */
#ifdef ACPI_HELP_APP
@@ -138,11 +134,16 @@
#define ACPI_REDUCED_HARDWARE 1
#endif
-/* Linkable ACPICA library */
+/* Linkable ACPICA library. Two versions, one with full debug. */
#ifdef ACPI_LIBRARY
#define ACPI_USE_LOCAL_CACHE
-#define ACPI_FULL_DEBUG
+#define ACPI_DEBUGGER 1
+#define ACPI_DISASSEMBLER 1
+
+#ifdef _DEBUG
+#define ACPI_DEBUG_OUTPUT
+#endif
#endif
/* Common for all ACPICA applications */
@@ -218,6 +219,9 @@
#elif defined(__HAIKU__)
#include "achaiku.h"
+#elif defined(__QNX__)
+#include "acqnx.h"
+
#else
/* Unknown environment */
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
deleted file mode 100644
index 28084a1034fe..000000000000
--- a/include/acpi/platform/acmsvcex.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/******************************************************************************
- *
- * Name: acmsvcex.h - Extra VC specific defines, etc.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2016, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACMSVCEX_H__
-#define __ACMSVCEX_H__
-
-/* Debug support. */
-
-#ifdef _DEBUG
-#define _CRTDBG_MAP_ALLOC /* Enables specific file/lineno for leaks */
-#include <crtdbg.h>
-#endif
-
-#endif /* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
deleted file mode 100644
index a00b3e4b80b0..000000000000
--- a/include/acpi/platform/acwinex.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- *
- * Name: acwinex.h - Extra OS specific defines, etc.
- *
- *****************************************************************************/
-
-/*
- * Copyright (C) 2000 - 2016, Intel Corp.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce at minimum a disclaimer
- * substantially similar to the "NO WARRANTY" disclaimer below
- * ("Disclaimer") and any redistribution must be conditioned upon
- * including a substantially similar Disclaimer requirement for further
- * binary redistribution.
- * 3. Neither the names of the above-listed copyright holders nor the names
- * of any contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * NO WARRANTY
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGES.
- */
-
-#ifndef __ACWINEX_H__
-#define __ACWINEX_H__
-
-/* Windows uses VC */
-
-#endif /* __ACWINEX_H__ */
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 5ca2f2c16458..70a41f742037 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -4,6 +4,19 @@
#include <linux/errno.h> /* for ENODEV */
#include <linux/types.h> /* for bool */
+struct acpi_video_brightness_flags {
+ u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
+ u8 _BCL_reversed:1; /* _BCL package is in a reversed order */
+ u8 _BQC_use_index:1; /* _BQC returns an index value */
+};
+
+struct acpi_video_device_brightness {
+ int curr;
+ int count;
+ int *levels;
+ struct acpi_video_brightness_flags flags;
+};
+
struct acpi_device;
#define ACPI_VIDEO_CLASS "video"
@@ -37,6 +50,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
* may change over time and should not be cached.
*/
extern bool acpi_video_handles_brightness_key_presses(void);
+extern int acpi_video_get_levels(struct acpi_device *device,
+ struct acpi_video_device_brightness **dev_br);
#else
static inline int acpi_video_register(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
@@ -56,6 +71,11 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
{
return false;
}
+static inline int acpi_video_get_levels(struct acpi_device *device,
+ struct acpi_video_device_brightness **dev_br)
+{
+ return -ENODEV;
+}
#endif
#endif
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index e56272c919b5..bf2d34c9d804 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 val;
preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0))
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ preempt_enable();
return -EFAULT;
+ }
*uval = val;
preempt_enable();
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index eed3bbe88c8a..002b81f6f2bc 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readl_relaxed readl
#endif
-#ifndef readq_relaxed
+#if defined(readq) && !defined(readq_relaxed)
#define readq_relaxed readq
#endif
@@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define writel_relaxed writel
#endif
-#ifndef writeq_relaxed
+#if defined(writeq) && !defined(writeq_relaxed)
#define writeq_relaxed writeq
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 9401f4819891..d4458b6dbfb4 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd)
#define io_remap_pfn_range remap_pfn_range
#endif
+#ifndef has_transparent_hugepage
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#else
+#define has_transparent_hugepage() 0
+#endif
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 35a52a880b2f..6bd05700d8c9 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -28,7 +28,30 @@
*/
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
- return atomic_read(&lock->val);
+ /*
+ * queued_spin_lock_slowpath() can ACQUIRE the lock before
+ * issuing the unordered store that sets _Q_LOCKED_VAL.
+ *
+ * See both smp_cond_acquire() sites for more detail.
+ *
+ * This however means that in code like:
+ *
+ * spin_lock(A) spin_lock(B)
+ * spin_unlock_wait(B) spin_is_locked(A)
+ * do_something() do_something()
+ *
+ * Both CPUs can end up running do_something() because the store
+ * setting _Q_LOCKED_VAL will pass through the loads in
+ * spin_unlock_wait() and/or spin_is_locked().
+ *
+ * Avoid this by issuing a full memory barrier between the spin_lock()
+ * and the loads in spin_unlock_wait() and spin_is_locked().
+ *
+ * Note that regular mutual exclusion doesn't care about this
+ * delayed store.
+ */
+ smp_mb();
+ return atomic_read(&lock->val) & _Q_LOCKED_MASK;
}
/**
@@ -108,6 +131,8 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
*/
static inline void queued_spin_unlock_wait(struct qspinlock *lock)
{
+ /* See queued_spin_is_locked() */
+ smp_mb();
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
cpu_relax();
}
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index d6d5dc98d7da..3fc94a046bf5 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -53,7 +53,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* lock for writing
*/
-static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+static inline void __down_write(struct rw_semaphore *sem)
{
long tmp;
@@ -63,9 +63,16 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
rwsem_down_write_failed(sem);
}
-static inline void __down_write(struct rw_semaphore *sem)
+static inline int __down_write_killable(struct rw_semaphore *sem)
{
- __down_write_nested(sem, 0);
+ long tmp;
+
+ tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_long_t *)&sem->count);
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+ return -EINTR;
+ return 0;
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index c9ccafa0d99a..e74072d23e69 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -29,4 +29,18 @@
#define __NR_seccomp_sigreturn __NR_rt_sigreturn
#endif
+#ifdef CONFIG_COMPAT
+#ifndef get_compat_mode1_syscalls
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int mode1_syscalls_32[] = {
+ __NR_seccomp_read_32, __NR_seccomp_write_32,
+ __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
+ 0, /* null terminated */
+ };
+ return mode1_syscalls_32;
+}
+#endif
+#endif /* CONFIG_COMPAT */
+
#endif /* _ASM_GENERIC_SECCOMP_H */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 3d1a3af5cf59..a2508a8f9a9c 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -17,21 +17,6 @@
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
-#ifndef HAVE_ARCH_COPY_SIGINFO
-
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
-}
-
-#endif
-
extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 339125bb4d2c..6a67ab94b553 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -245,7 +245,9 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- *(.data..init_task)
+ VMLINUX_SYMBOL(__start_init_task) = .; \
+ *(.data..init_task) \
+ VMLINUX_SYMBOL(__end_init_task) = .;
/*
* Read only Data
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 25d0914481a2..caedb74c9210 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -49,11 +49,16 @@ enum arch_timer_reg {
#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */
+struct arch_timer_kvm_info {
+ struct timecounter timecounter;
+ int virtual_irq;
+};
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
extern u64 (*arch_timer_read_counter)(void);
-extern struct timecounter *arch_timer_get_timecounter(void);
+extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
#else
@@ -67,11 +72,6 @@ static inline u64 arch_timer_read_counter(void)
return 0;
}
-static inline struct timecounter *arch_timer_get_timecounter(void)
-{
- return NULL;
-}
-
#endif
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 957bb8763219..75174f80a106 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -405,8 +405,7 @@ static inline void aead_request_set_tfm(struct aead_request *req,
* encrypt and decrypt API calls. During the allocation, the provided aead
* handle is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp)
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 1969f1416658..26605888a199 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -547,8 +547,7 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
* the allocation, the provided ahash handle
* is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp)
diff --git a/include/crypto/pkcs7.h b/include/crypto/pkcs7.h
index 441aff9b5aa7..583f199400a3 100644
--- a/include/crypto/pkcs7.h
+++ b/include/crypto/pkcs7.h
@@ -12,6 +12,7 @@
#ifndef _CRYPTO_PKCS7_H
#define _CRYPTO_PKCS7_H
+#include <linux/verification.h>
#include <crypto/public_key.h>
struct key;
@@ -26,14 +27,13 @@ extern void pkcs7_free_message(struct pkcs7_message *pkcs7);
extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
const void **_data, size_t *_datalen,
- bool want_wrapper);
+ size_t *_headerlen);
/*
* pkcs7_trust.c
*/
extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
- struct key *trust_keyring,
- bool *_trusted);
+ struct key *trust_keyring);
/*
* pkcs7_verify.c
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index aa730ea7faf8..882ca0e1e7a5 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,20 +15,6 @@
#define _LINUX_PUBLIC_KEY_H
/*
- * The use to which an asymmetric key is being put.
- */
-enum key_being_used_for {
- VERIFYING_MODULE_SIGNATURE,
- VERIFYING_FIRMWARE_SIGNATURE,
- VERIFYING_KEXEC_PE_SIGNATURE,
- VERIFYING_KEY_SIGNATURE,
- VERIFYING_KEY_SELF_SIGNATURE,
- VERIFYING_UNSPECIFIED_SIGNATURE,
- NR__KEY_BEING_USED_FOR
-};
-extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
-
-/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
* Note that this may include private part of the key as well as the public
@@ -41,12 +27,13 @@ struct public_key {
const char *pkey_algo;
};
-extern void public_key_destroy(void *payload);
+extern void public_key_free(struct public_key *key);
/*
* Public key cryptography signature data
*/
struct public_key_signature {
+ struct asymmetric_key_id *auth_ids[2];
u8 *s; /* Signature */
u32 s_size; /* Number of bytes in signature */
u8 *digest;
@@ -55,17 +42,21 @@ struct public_key_signature {
const char *hash_algo;
};
+extern void public_key_signature_free(struct public_key_signature *sig);
+
extern struct asymmetric_key_subtype public_key_subtype;
+
struct key;
+struct key_type;
+union key_payload;
+
+extern int restrict_link_by_signature(struct key *trust_keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
+
extern int verify_signature(const struct key *key,
const struct public_key_signature *sig);
-struct asymmetric_key_id;
-extern struct key *x509_request_asymmetric_key(struct key *keyring,
- const struct asymmetric_key_id *id,
- const struct asymmetric_key_id *skid,
- bool partial);
-
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 905490c1da89..0f987f50bb52 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -425,8 +425,7 @@ static inline struct skcipher_request *skcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided skcipher
* handle is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct skcipher_request *skcipher_request_alloc(
struct crypto_skcipher *tfm, gfp_t gfp)
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 461a0558bca4..cebecff536a3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
{
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+ return false;
#else
return true;
#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index afae2316bd43..055a08ddac02 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -92,7 +92,7 @@ struct ttm_placement {
*/
struct ttm_bus_placement {
void *addr;
- unsigned long base;
+ phys_addr_t base;
unsigned long size;
unsigned long offset;
bool is_iomem;
diff --git a/include/dt-bindings/clock/ath79-clk.h b/include/dt-bindings/clock/ath79-clk.h
new file mode 100644
index 000000000000..27359ad83904
--- /dev/null
+++ b/include/dt-bindings/clock/ath79-clk.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2014, 2016 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_ATH79_CLK_H
+#define __DT_BINDINGS_ATH79_CLK_H
+
+#define ATH79_CLK_CPU 0
+#define ATH79_CLK_DDR 1
+#define ATH79_CLK_AHB 2
+
+#define ATH79_CLK_END 3
+
+#endif /* __DT_BINDINGS_ATH79_CLK_H */
diff --git a/include/dt-bindings/clock/microchip,pic32-clock.h b/include/dt-bindings/clock/microchip,pic32-clock.h
new file mode 100644
index 000000000000..184647a6a8de
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,pic32-clock.h
@@ -0,0 +1,42 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_
+#define _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_
+
+/* clock output indices */
+#define POSCCLK 0
+#define FRCCLK 1
+#define BFRCCLK 2
+#define LPRCCLK 3
+#define SOSCCLK 4
+#define FRCDIVCLK 5
+#define PLLCLK 6
+#define SCLK 7
+#define PB1CLK 8
+#define PB2CLK 9
+#define PB3CLK 10
+#define PB4CLK 11
+#define PB5CLK 12
+#define PB6CLK 13
+#define PB7CLK 14
+#define REF1CLK 15
+#define REF2CLK 16
+#define REF3CLK 17
+#define REF4CLK 18
+#define REF5CLK 19
+#define UPLLCLK 20
+#define MAXCLKS 21
+
+#endif /* _DT_BINDINGS_CLK_MICROCHIP_PIC32_H_ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index 7b1ad8922eec..fa5e8da809f2 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -66,6 +66,7 @@
#define R8A7790_CLK_IIC2 0
#define R8A7790_CLK_TPU0 4
#define R8A7790_CLK_MMCIF1 5
+#define R8A7790_CLK_SCIF2 10
#define R8A7790_CLK_SDHI3 11
#define R8A7790_CLK_SDHI2 12
#define R8A7790_CLK_SDHI1 13
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index f843de6bf377..4d3ecd626c1f 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -21,6 +21,7 @@
#define R8A7794_CLK_SDH 6
#define R8A7794_CLK_SD0 7
#define R8A7794_CLK_Z 8
+#define R8A7794_CLK_RCAN 9
/* MSTP0 */
#define R8A7794_CLK_MSIOF0 0
@@ -56,6 +57,8 @@
#define R8A7794_CLK_SDHI1 12
#define R8A7794_CLK_SDHI0 14
#define R8A7794_CLK_MMCIF0 15
+#define R8A7794_CLK_IIC0 18
+#define R8A7794_CLK_IIC1 23
#define R8A7794_CLK_CMT1 29
#define R8A7794_CLK_USBDMAC0 30
#define R8A7794_CLK_USBDMAC1 31
@@ -95,6 +98,8 @@
#define R8A7794_CLK_GPIO2 10
#define R8A7794_CLK_GPIO1 11
#define R8A7794_CLK_GPIO0 12
+#define R8A7794_CLK_RCAN1 15
+#define R8A7794_CLK_RCAN0 16
#define R8A7794_CLK_QSPI_MOD 17
#define R8A7794_CLK_I2C5 25
#define R8A7794_CLK_I2C4 27
diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h
new file mode 100644
index 000000000000..58654fd7aa1e
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h
@@ -0,0 +1,154 @@
+/*
+ * GPIO definitions for Amlogic Meson GXBB SoCs
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_MESON_GXBB_GPIO_H
+#define _DT_BINDINGS_MESON_GXBB_GPIO_H
+
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOAO_12 12
+#define GPIOAO_13 13
+
+#define GPIOZ_0 0
+#define GPIOZ_1 1
+#define GPIOZ_2 2
+#define GPIOZ_3 3
+#define GPIOZ_4 4
+#define GPIOZ_5 5
+#define GPIOZ_6 6
+#define GPIOZ_7 7
+#define GPIOZ_8 8
+#define GPIOZ_9 9
+#define GPIOZ_10 10
+#define GPIOZ_11 11
+#define GPIOZ_12 12
+#define GPIOZ_13 13
+#define GPIOZ_14 14
+#define GPIOZ_15 15
+#define GPIOH_0 16
+#define GPIOH_1 17
+#define GPIOH_2 18
+#define GPIOH_3 19
+#define BOOT_0 20
+#define BOOT_1 21
+#define BOOT_2 22
+#define BOOT_3 23
+#define BOOT_4 24
+#define BOOT_5 25
+#define BOOT_6 26
+#define BOOT_7 27
+#define BOOT_8 28
+#define BOOT_9 29
+#define BOOT_10 30
+#define BOOT_11 31
+#define BOOT_12 32
+#define BOOT_13 33
+#define BOOT_14 34
+#define BOOT_15 35
+#define BOOT_16 36
+#define BOOT_17 37
+#define CARD_0 38
+#define CARD_1 39
+#define CARD_2 40
+#define CARD_3 41
+#define CARD_4 42
+#define CARD_5 43
+#define CARD_6 44
+#define GPIODV_0 45
+#define GPIODV_1 46
+#define GPIODV_2 47
+#define GPIODV_3 48
+#define GPIODV_4 49
+#define GPIODV_5 50
+#define GPIODV_6 51
+#define GPIODV_7 52
+#define GPIODV_8 53
+#define GPIODV_9 54
+#define GPIODV_10 55
+#define GPIODV_11 56
+#define GPIODV_12 57
+#define GPIODV_13 58
+#define GPIODV_14 59
+#define GPIODV_15 60
+#define GPIODV_16 61
+#define GPIODV_17 62
+#define GPIODV_18 63
+#define GPIODV_19 64
+#define GPIODV_20 65
+#define GPIODV_21 66
+#define GPIODV_22 67
+#define GPIODV_23 68
+#define GPIODV_24 69
+#define GPIODV_25 70
+#define GPIODV_26 71
+#define GPIODV_27 72
+#define GPIODV_28 73
+#define GPIODV_29 74
+#define GPIOY_0 75
+#define GPIOY_1 76
+#define GPIOY_2 77
+#define GPIOY_3 78
+#define GPIOY_4 79
+#define GPIOY_5 80
+#define GPIOY_6 81
+#define GPIOY_7 82
+#define GPIOY_8 83
+#define GPIOY_9 84
+#define GPIOY_10 85
+#define GPIOY_11 86
+#define GPIOY_12 87
+#define GPIOY_13 88
+#define GPIOY_14 89
+#define GPIOY_15 90
+#define GPIOY_16 91
+#define GPIOX_0 92
+#define GPIOX_1 93
+#define GPIOX_2 94
+#define GPIOX_3 95
+#define GPIOX_4 96
+#define GPIOX_5 97
+#define GPIOX_6 98
+#define GPIOX_7 99
+#define GPIOX_8 100
+#define GPIOX_9 101
+#define GPIOX_10 102
+#define GPIOX_11 103
+#define GPIOX_12 104
+#define GPIOX_13 105
+#define GPIOX_14 106
+#define GPIOX_15 107
+#define GPIOX_16 108
+#define GPIOX_17 109
+#define GPIOX_18 110
+#define GPIOX_19 111
+#define GPIOX_20 112
+#define GPIOX_21 113
+#define GPIOX_22 114
+#define GPIOCLK_0 115
+#define GPIOCLK_1 116
+#define GPIOCLK_2 117
+#define GPIOCLK_3 118
+#define GPIO_TEST_N 119
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra-gpio.h b/include/dt-bindings/gpio/tegra-gpio.h
index 197dc28b676e..a1c09e88e80b 100644
--- a/include/dt-bindings/gpio/tegra-gpio.h
+++ b/include/dt-bindings/gpio/tegra-gpio.h
@@ -12,40 +12,40 @@
#include <dt-bindings/gpio/gpio.h>
-#define TEGRA_GPIO_BANK_ID_A 0
-#define TEGRA_GPIO_BANK_ID_B 1
-#define TEGRA_GPIO_BANK_ID_C 2
-#define TEGRA_GPIO_BANK_ID_D 3
-#define TEGRA_GPIO_BANK_ID_E 4
-#define TEGRA_GPIO_BANK_ID_F 5
-#define TEGRA_GPIO_BANK_ID_G 6
-#define TEGRA_GPIO_BANK_ID_H 7
-#define TEGRA_GPIO_BANK_ID_I 8
-#define TEGRA_GPIO_BANK_ID_J 9
-#define TEGRA_GPIO_BANK_ID_K 10
-#define TEGRA_GPIO_BANK_ID_L 11
-#define TEGRA_GPIO_BANK_ID_M 12
-#define TEGRA_GPIO_BANK_ID_N 13
-#define TEGRA_GPIO_BANK_ID_O 14
-#define TEGRA_GPIO_BANK_ID_P 15
-#define TEGRA_GPIO_BANK_ID_Q 16
-#define TEGRA_GPIO_BANK_ID_R 17
-#define TEGRA_GPIO_BANK_ID_S 18
-#define TEGRA_GPIO_BANK_ID_T 19
-#define TEGRA_GPIO_BANK_ID_U 20
-#define TEGRA_GPIO_BANK_ID_V 21
-#define TEGRA_GPIO_BANK_ID_W 22
-#define TEGRA_GPIO_BANK_ID_X 23
-#define TEGRA_GPIO_BANK_ID_Y 24
-#define TEGRA_GPIO_BANK_ID_Z 25
-#define TEGRA_GPIO_BANK_ID_AA 26
-#define TEGRA_GPIO_BANK_ID_BB 27
-#define TEGRA_GPIO_BANK_ID_CC 28
-#define TEGRA_GPIO_BANK_ID_DD 29
-#define TEGRA_GPIO_BANK_ID_EE 30
-#define TEGRA_GPIO_BANK_ID_FF 31
+#define TEGRA_GPIO_PORT_A 0
+#define TEGRA_GPIO_PORT_B 1
+#define TEGRA_GPIO_PORT_C 2
+#define TEGRA_GPIO_PORT_D 3
+#define TEGRA_GPIO_PORT_E 4
+#define TEGRA_GPIO_PORT_F 5
+#define TEGRA_GPIO_PORT_G 6
+#define TEGRA_GPIO_PORT_H 7
+#define TEGRA_GPIO_PORT_I 8
+#define TEGRA_GPIO_PORT_J 9
+#define TEGRA_GPIO_PORT_K 10
+#define TEGRA_GPIO_PORT_L 11
+#define TEGRA_GPIO_PORT_M 12
+#define TEGRA_GPIO_PORT_N 13
+#define TEGRA_GPIO_PORT_O 14
+#define TEGRA_GPIO_PORT_P 15
+#define TEGRA_GPIO_PORT_Q 16
+#define TEGRA_GPIO_PORT_R 17
+#define TEGRA_GPIO_PORT_S 18
+#define TEGRA_GPIO_PORT_T 19
+#define TEGRA_GPIO_PORT_U 20
+#define TEGRA_GPIO_PORT_V 21
+#define TEGRA_GPIO_PORT_W 22
+#define TEGRA_GPIO_PORT_X 23
+#define TEGRA_GPIO_PORT_Y 24
+#define TEGRA_GPIO_PORT_Z 25
+#define TEGRA_GPIO_PORT_AA 26
+#define TEGRA_GPIO_PORT_BB 27
+#define TEGRA_GPIO_PORT_CC 28
+#define TEGRA_GPIO_PORT_DD 29
+#define TEGRA_GPIO_PORT_EE 30
+#define TEGRA_GPIO_PORT_FF 31
-#define TEGRA_GPIO(bank, offset) \
- ((TEGRA_GPIO_BANK_ID_##bank * 8) + offset)
+#define TEGRA_GPIO(port, offset) \
+ ((TEGRA_GPIO_PORT_##port * 8) + offset)
#endif
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
new file mode 100644
index 000000000000..38001c7023f1
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -0,0 +1,56 @@
+/*
+ * This header provides constants for binding nvidia,tegra186-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA_MAIN_GPIO_PORT_A 0
+#define TEGRA_MAIN_GPIO_PORT_B 1
+#define TEGRA_MAIN_GPIO_PORT_C 2
+#define TEGRA_MAIN_GPIO_PORT_D 3
+#define TEGRA_MAIN_GPIO_PORT_E 4
+#define TEGRA_MAIN_GPIO_PORT_F 5
+#define TEGRA_MAIN_GPIO_PORT_G 6
+#define TEGRA_MAIN_GPIO_PORT_H 7
+#define TEGRA_MAIN_GPIO_PORT_I 8
+#define TEGRA_MAIN_GPIO_PORT_J 9
+#define TEGRA_MAIN_GPIO_PORT_K 10
+#define TEGRA_MAIN_GPIO_PORT_L 11
+#define TEGRA_MAIN_GPIO_PORT_M 12
+#define TEGRA_MAIN_GPIO_PORT_N 13
+#define TEGRA_MAIN_GPIO_PORT_O 14
+#define TEGRA_MAIN_GPIO_PORT_P 15
+#define TEGRA_MAIN_GPIO_PORT_Q 16
+#define TEGRA_MAIN_GPIO_PORT_R 17
+#define TEGRA_MAIN_GPIO_PORT_T 18
+#define TEGRA_MAIN_GPIO_PORT_X 19
+#define TEGRA_MAIN_GPIO_PORT_Y 20
+#define TEGRA_MAIN_GPIO_PORT_BB 21
+#define TEGRA_MAIN_GPIO_PORT_CC 22
+
+#define TEGRA_MAIN_GPIO(port, offset) \
+ ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA_AON_GPIO_PORT_S 0
+#define TEGRA_AON_GPIO_PORT_U 1
+#define TEGRA_AON_GPIO_PORT_V 2
+#define TEGRA_AON_GPIO_PORT_W 3
+#define TEGRA_AON_GPIO_PORT_Z 4
+#define TEGRA_AON_GPIO_PORT_AA 5
+#define TEGRA_AON_GPIO_PORT_EE 6
+#define TEGRA_AON_GPIO_PORT_FF 7
+
+#define TEGRA_AON_GPIO(port, offset) \
+ ((TEGRA_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h
index c40f665e2712..dedf46ffdb53 100644
--- a/include/dt-bindings/mfd/arizona.h
+++ b/include/dt-bindings/mfd/arizona.h
@@ -110,4 +110,9 @@
#define ARIZONA_ACCDET_MODE_HPM 4
#define ARIZONA_ACCDET_MODE_ADC 7
+#define ARIZONA_GPSW_OPEN 0
+#define ARIZONA_GPSW_CLOSED 1
+#define ARIZONA_GPSW_CLAMP_ENABLED 2
+#define ARIZONA_GPSW_CLAMP_DISABLED 3
+
#endif
diff --git a/include/dt-bindings/mfd/max77620.h b/include/dt-bindings/mfd/max77620.h
new file mode 100644
index 000000000000..b911a0720ccd
--- /dev/null
+++ b/include/dt-bindings/mfd/max77620.h
@@ -0,0 +1,39 @@
+/*
+ * This header provides macros for MAXIM MAX77620 device bindings.
+ *
+ * Copyright (c) 2016, NVIDIA Corporation.
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ */
+
+#ifndef _DT_BINDINGS_MFD_MAX77620_H
+#define _DT_BINDINGS_MFD_MAX77620_H
+
+/* MAX77620 interrupts */
+#define MAX77620_IRQ_TOP_GLBL 0 /* Low-Battery */
+#define MAX77620_IRQ_TOP_SD 1 /* SD power fail */
+#define MAX77620_IRQ_TOP_LDO 2 /* LDO power fail */
+#define MAX77620_IRQ_TOP_GPIO 3 /* GPIO internal int to MAX77620 */
+#define MAX77620_IRQ_TOP_RTC 4 /* RTC */
+#define MAX77620_IRQ_TOP_32K 5 /* 32kHz oscillator */
+#define MAX77620_IRQ_TOP_ONOFF 6 /* ON/OFF oscillator */
+#define MAX77620_IRQ_LBT_MBATLOW 7 /* Thermal alarm status, > 120C */
+#define MAX77620_IRQ_LBT_TJALRM1 8 /* Thermal alarm status, > 120C */
+#define MAX77620_IRQ_LBT_TJALRM2 9 /* Thermal alarm status, > 140C */
+
+/* FPS event source */
+#define MAX77620_FPS_EVENT_SRC_EN0 0
+#define MAX77620_FPS_EVENT_SRC_EN1 1
+#define MAX77620_FPS_EVENT_SRC_SW 2
+
+/* Device state when FPS event LOW */
+#define MAX77620_FPS_INACTIVE_STATE_SLEEP 0
+#define MAX77620_FPS_INACTIVE_STATE_LOW_POWER 1
+
+/* FPS source */
+#define MAX77620_FPS_SRC_0 0
+#define MAX77620_FPS_SRC_1 1
+#define MAX77620_FPS_SRC_2 2
+#define MAX77620_FPS_SRC_NONE 3
+#define MAX77620_FPS_SRC_DEF 4
+
+#endif
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
new file mode 100644
index 000000000000..38f1ea879ea1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -0,0 +1,59 @@
+/*
+ * This header provides constants for hisilicon pinctrl bindings.
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_HISI_H
+#define _DT_BINDINGS_PINCTRL_HISI_H
+
+/* iomg bit definition */
+#define MUX_M0 0
+#define MUX_M1 1
+#define MUX_M2 2
+#define MUX_M3 3
+#define MUX_M4 4
+#define MUX_M5 5
+#define MUX_M6 6
+#define MUX_M7 7
+
+/* iocg bit definition */
+#define PULL_MASK (3)
+#define PULL_DIS (0)
+#define PULL_UP (1 << 0)
+#define PULL_DOWN (1 << 1)
+
+/* drive strength definition */
+#define DRIVE_MASK (7 << 4)
+#define DRIVE1_02MA (0 << 4)
+#define DRIVE1_04MA (1 << 4)
+#define DRIVE1_08MA (2 << 4)
+#define DRIVE1_10MA (3 << 4)
+#define DRIVE2_02MA (0 << 4)
+#define DRIVE2_04MA (1 << 4)
+#define DRIVE2_08MA (2 << 4)
+#define DRIVE2_10MA (3 << 4)
+#define DRIVE3_04MA (0 << 4)
+#define DRIVE3_08MA (1 << 4)
+#define DRIVE3_12MA (2 << 4)
+#define DRIVE3_16MA (3 << 4)
+#define DRIVE3_20MA (4 << 4)
+#define DRIVE3_24MA (5 << 4)
+#define DRIVE3_32MA (6 << 4)
+#define DRIVE3_40MA (7 << 4)
+#define DRIVE4_02MA (0 << 4)
+#define DRIVE4_04MA (2 << 4)
+#define DRIVE4_08MA (4 << 4)
+#define DRIVE4_10MA (6 << 4)
+
+#endif
diff --git a/include/dt-bindings/power/r8a7779-sysc.h b/include/dt-bindings/power/r8a7779-sysc.h
new file mode 100644
index 000000000000..183571da507e
--- /dev/null
+++ b/include/dt-bindings/power/r8a7779-sysc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7779_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7779_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7779_PD_ARM1 1
+#define R8A7779_PD_ARM2 2
+#define R8A7779_PD_ARM3 3
+#define R8A7779_PD_SGX 20
+#define R8A7779_PD_VDP 21
+#define R8A7779_PD_IMP 24
+
+/* Always-on power area */
+#define R8A7779_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7779_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7790-sysc.h b/include/dt-bindings/power/r8a7790-sysc.h
new file mode 100644
index 000000000000..6af4e9929bd0
--- /dev/null
+++ b/include/dt-bindings/power/r8a7790-sysc.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7790_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7790_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7790_PD_CA15_CPU0 0
+#define R8A7790_PD_CA15_CPU1 1
+#define R8A7790_PD_CA15_CPU2 2
+#define R8A7790_PD_CA15_CPU3 3
+#define R8A7790_PD_CA7_CPU0 5
+#define R8A7790_PD_CA7_CPU1 6
+#define R8A7790_PD_CA7_CPU2 7
+#define R8A7790_PD_CA7_CPU3 8
+#define R8A7790_PD_CA15_SCU 12
+#define R8A7790_PD_SH_4A 16
+#define R8A7790_PD_RGX 20
+#define R8A7790_PD_CA7_SCU 21
+#define R8A7790_PD_IMP 24
+
+/* Always-on power area */
+#define R8A7790_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7790_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7791-sysc.h b/include/dt-bindings/power/r8a7791-sysc.h
new file mode 100644
index 000000000000..1403baa0514f
--- /dev/null
+++ b/include/dt-bindings/power/r8a7791-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7791_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7791_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7791_PD_CA15_CPU0 0
+#define R8A7791_PD_CA15_CPU1 1
+#define R8A7791_PD_CA15_SCU 12
+#define R8A7791_PD_SH_4A 16
+#define R8A7791_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7791_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7791_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7793-sysc.h b/include/dt-bindings/power/r8a7793-sysc.h
new file mode 100644
index 000000000000..b5693df3d830
--- /dev/null
+++ b/include/dt-bindings/power/r8a7793-sysc.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7793_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7793_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ *
+ * Note that R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
+ */
+
+#define R8A7793_PD_CA15_CPU0 0
+#define R8A7793_PD_CA15_CPU1 1
+#define R8A7793_PD_CA15_SCU 12
+#define R8A7793_PD_SH_4A 16
+#define R8A7793_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7793_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7793_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7794-sysc.h b/include/dt-bindings/power/r8a7794-sysc.h
new file mode 100644
index 000000000000..862241c2d27b
--- /dev/null
+++ b/include/dt-bindings/power/r8a7794-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7794_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7794_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7794_PD_CA7_CPU0 5
+#define R8A7794_PD_CA7_CPU1 6
+#define R8A7794_PD_SH_4A 16
+#define R8A7794_PD_SGX 20
+#define R8A7794_PD_CA7_SCU 21
+
+/* Always-on power area */
+#define R8A7794_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7794_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
new file mode 100644
index 000000000000..ee2e26ba605e
--- /dev/null
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7795_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7795_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7795_PD_CA57_CPU0 0
+#define R8A7795_PD_CA57_CPU1 1
+#define R8A7795_PD_CA57_CPU2 2
+#define R8A7795_PD_CA57_CPU3 3
+#define R8A7795_PD_CA53_CPU0 5
+#define R8A7795_PD_CA53_CPU1 6
+#define R8A7795_PD_CA53_CPU2 7
+#define R8A7795_PD_CA53_CPU3 8
+#define R8A7795_PD_A3VP 9
+#define R8A7795_PD_CA57_SCU 12
+#define R8A7795_PD_CR7 13
+#define R8A7795_PD_A3VC 14
+#define R8A7795_PD_3DG_A 17
+#define R8A7795_PD_3DG_B 18
+#define R8A7795_PD_3DG_C 19
+#define R8A7795_PD_3DG_D 20
+#define R8A7795_PD_CA53_SCU 21
+#define R8A7795_PD_3DG_E 22
+#define R8A7795_PD_A3IR 24
+#define R8A7795_PD_A2VC0 25
+#define R8A7795_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A7795_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7795_SYSC_H__ */
diff --git a/include/dt-bindings/power/rk3399-power.h b/include/dt-bindings/power/rk3399-power.h
new file mode 100644
index 000000000000..168b3bfbd6f5
--- /dev/null
+++ b/include/dt-bindings/power/rk3399-power.h
@@ -0,0 +1,53 @@
+#ifndef __DT_BINDINGS_POWER_RK3399_POWER_H__
+#define __DT_BINDINGS_POWER_RK3399_POWER_H__
+
+/* VD_CORE_L */
+#define RK3399_PD_A53_L0 0
+#define RK3399_PD_A53_L1 1
+#define RK3399_PD_A53_L2 2
+#define RK3399_PD_A53_L3 3
+#define RK3399_PD_SCU_L 4
+
+/* VD_CORE_B */
+#define RK3399_PD_A72_B0 5
+#define RK3399_PD_A72_B1 6
+#define RK3399_PD_SCU_B 7
+
+/* VD_LOGIC */
+#define RK3399_PD_TCPD0 8
+#define RK3399_PD_TCPD1 9
+#define RK3399_PD_CCI 10
+#define RK3399_PD_CCI0 11
+#define RK3399_PD_CCI1 12
+#define RK3399_PD_PERILP 13
+#define RK3399_PD_PERIHP 14
+#define RK3399_PD_VIO 15
+#define RK3399_PD_VO 16
+#define RK3399_PD_VOPB 17
+#define RK3399_PD_VOPL 18
+#define RK3399_PD_ISP0 19
+#define RK3399_PD_ISP1 20
+#define RK3399_PD_HDCP 21
+#define RK3399_PD_GMAC 22
+#define RK3399_PD_EMMC 23
+#define RK3399_PD_USB3 24
+#define RK3399_PD_EDP 25
+#define RK3399_PD_GIC 26
+#define RK3399_PD_SD 27
+#define RK3399_PD_SDIOAUDIO 28
+#define RK3399_PD_ALIVE 29
+
+/* VD_CENTER */
+#define RK3399_PD_CENTER 30
+#define RK3399_PD_VCODEC 31
+#define RK3399_PD_VDU 32
+#define RK3399_PD_RGA 33
+#define RK3399_PD_IEP 34
+
+/* VD_GPU */
+#define RK3399_PD_GPU 35
+
+/* VD_PMU */
+#define RK3399_PD_PMU 36
+
+#endif
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 4915d40d3c3c..2480469ce8fb 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -32,7 +32,7 @@ struct asymmetric_key_subtype {
void (*describe)(const struct key *key, struct seq_file *m);
/* Destroy a key of this subtype */
- void (*destroy)(void *payload);
+ void (*destroy)(void *payload_crypto, void *payload_auth);
/* Verify the signature on a key of this subtype (optional) */
int (*verify_signature)(const struct key *key,
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 59c1df9cf922..b38240716d41 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -15,6 +15,7 @@
#define _KEYS_ASYMMETRIC_TYPE_H
#include <linux/key-type.h>
+#include <linux/verification.h>
extern struct key_type key_type_asymmetric;
@@ -23,9 +24,10 @@ extern struct key_type key_type_asymmetric;
* follows:
*/
enum asymmetric_payload_bits {
- asym_crypto,
- asym_subtype,
- asym_key_ids,
+ asym_crypto, /* The data representing the key */
+ asym_subtype, /* Pointer to an asymmetric_key_subtype struct */
+ asym_key_ids, /* Pointer to an asymmetric_key_ids struct */
+ asym_auth /* The key's authorisation (signature, parent key ID) */
};
/*
@@ -74,6 +76,11 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
return key->payload.data[asym_key_ids];
}
+extern struct key *find_asymmetric_key(struct key *keyring,
+ const struct asymmetric_key_id *id_0,
+ const struct asymmetric_key_id *id_1,
+ bool partial);
+
/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 39fd38cfa8c9..fbd4647767e9 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -12,51 +12,40 @@
#ifndef _KEYS_SYSTEM_KEYRING_H
#define _KEYS_SYSTEM_KEYRING_H
+#include <linux/key.h>
+
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
-#include <linux/key.h>
-#include <crypto/public_key.h>
+extern int restrict_link_by_builtin_trusted(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
-extern struct key *system_trusted_keyring;
-static inline struct key *get_system_trusted_keyring(void)
-{
- return system_trusted_keyring;
-}
#else
-static inline struct key *get_system_trusted_keyring(void)
-{
- return NULL;
-}
+#define restrict_link_by_builtin_trusted restrict_link_reject
#endif
-#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
-extern int system_verify_data(const void *data, unsigned long len,
- const void *raw_pkcs7, size_t pkcs7_len,
- enum key_being_used_for usage);
+#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
+extern int restrict_link_by_builtin_and_secondary_trusted(
+ struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
+#else
+#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
#endif
-#ifdef CONFIG_IMA_MOK_KEYRING
-extern struct key *ima_mok_keyring;
+#ifdef CONFIG_IMA_BLACKLIST_KEYRING
extern struct key *ima_blacklist_keyring;
-static inline struct key *get_ima_mok_keyring(void)
-{
- return ima_mok_keyring;
-}
static inline struct key *get_ima_blacklist_keyring(void)
{
return ima_blacklist_keyring;
}
#else
-static inline struct key *get_ima_mok_keyring(void)
-{
- return NULL;
-}
static inline struct key *get_ima_blacklist_keyring(void)
{
return NULL;
}
-#endif /* CONFIG_IMA_MOK_KEYRING */
+#endif /* CONFIG_IMA_BLACKLIST_KEYRING */
#endif /* _KEYS_SYSTEM_KEYRING_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 281caf847fad..be6037aa703d 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -25,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <kvm/iodev.h>
+#include <linux/irqchip/arm-gic-common.h>
#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
@@ -353,15 +354,15 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define vgic_ready(k) ((k)->arch.vgic.ready)
-int vgic_v2_probe(struct device_node *vgic_node,
+int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
const struct vgic_params **params);
#ifdef CONFIG_KVM_ARM_VGIC_V3
-int vgic_v3_probe(struct device_node *vgic_node,
+int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
const struct vgic_params **params);
#else
-static inline int vgic_v3_probe(struct device_node *vgic_node,
+static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
const struct vgic_params **params)
{
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 06ed7e54033e..288fac5294f5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -190,14 +190,6 @@ static inline int acpi_debugger_notify_command_complete(void)
}
#endif
-#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-void acpi_initrd_override(void *data, size_t size);
-#else
-static inline void acpi_initrd_override(void *data, size_t size)
-{
-}
-#endif
-
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
@@ -216,6 +208,7 @@ void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+void early_acpi_table_init(void *data, size_t size);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_parse_entries(char *id, unsigned long table_size,
@@ -278,6 +271,7 @@ void acpi_irq_stats_init(void);
extern u32 acpi_irq_handled;
extern u32 acpi_irq_not_handled;
extern unsigned int acpi_sci_irq;
+extern bool acpi_no_s5;
#define INVALID_ACPI_IRQ ((unsigned)-1)
static inline bool acpi_sci_irq_valid(void)
{
@@ -311,7 +305,6 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
-void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -359,7 +352,6 @@ extern bool wmi_has_guid(const char *guid);
extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
extern int acpi_blacklisted(void);
-extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
@@ -596,6 +588,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return NULL;
}
+static inline void early_acpi_table_init(void *data, size_t size) { }
static inline void acpi_early_init(void) { }
static inline void acpi_subsystem_init(void) { }
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 10fe2a211c2e..27e9ec8778eb 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -86,7 +86,7 @@ struct pl08x_channel_data {
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
*/
struct pl08x_platform_data {
- const struct pl08x_channel_data *slave_channels;
+ struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
int (*get_xfer_signal)(const struct pl08x_channel_data *);
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index b2d32e01dfe4..714186de8c36 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -35,7 +35,7 @@
*/
static inline bool apple_gmux_present(void)
{
- return acpi_dev_present(GMUX_ACPI_HID);
+ return acpi_dev_found(GMUX_ACPI_HID);
}
#else /* !CONFIG_APPLE_GMUX */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c1a2f345cbe6..f310ec0f072e 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -371,7 +371,8 @@ enum {
SETFEATURES_AAM_ON = 0x42,
SETFEATURES_AAM_OFF = 0xC2,
- SETFEATURES_SPINUP = 0x07, /* Spin-up drive */
+ SETFEATURES_SPINUP = 0x07, /* Spin-up drive */
+ SETFEATURES_SPINUP_TIMEOUT = 30000, /* 30s timeout for drive spin-up from PUIS */
SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index 33eb274cd0e6..e66153d60bd5 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,6 +31,10 @@ struct ath9k_platform_data {
u32 gpio_mask;
u32 gpio_val;
+ u32 bt_active_pin;
+ u32 bt_priority_pin;
+ u32 wlan_active_pin;
+
bool endian_check;
bool is_clk_25mhz;
bool tx_gain_buffalo;
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index df4f369254c0..e451534fe54d 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -559,25 +559,25 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
/**
- * fetch_or - perform *ptr |= mask and return old value of *ptr
- * @ptr: pointer to value
- * @mask: mask to OR on the value
- *
- * cmpxchg based fetch_or, macro so it works for different integer types
+ * atomic_fetch_or - perform *p |= mask and return old value of *p
+ * @mask: mask to OR on the atomic_t
+ * @p: pointer to atomic_t
*/
-#ifndef fetch_or
-#define fetch_or(ptr, mask) \
-({ typeof(*(ptr)) __old, __val = *(ptr); \
- for (;;) { \
- __old = cmpxchg((ptr), __val, __val | (mask)); \
- if (__old == __val) \
- break; \
- __val = __old; \
- } \
- __old; \
-})
-#endif
+#ifndef atomic_fetch_or
+static inline int atomic_fetch_or(int mask, atomic_t *p)
+{
+ int old, val = atomic_read(p);
+
+ for (;;) {
+ old = atomic_cmpxchg(p, val, val | mask);
+ if (old == val)
+ break;
+ val = old;
+ }
+ return old;
+}
+#endif
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index e38e3fc13ea8..961a417d641e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
+#include <linux/tty.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -347,6 +348,23 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
return tsk->sessionid;
}
+static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+ struct tty_struct *tty = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ if (tsk->signal)
+ tty = tty_kref_get(tsk->signal->tty);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ return tty;
+}
+
+static inline void audit_put_tty(struct tty_struct *tty)
+{
+ tty_kref_put(tty);
+}
+
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
extern void __audit_bprm(struct linux_binprm *bprm);
@@ -504,6 +522,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
{
return -1;
}
+static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+ return NULL;
+}
+static inline void audit_put_tty(struct tty_struct *tty)
+{ }
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{ }
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
struct backing_dev_info {
struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
unsigned int capabilities; /* Device capabilities */
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 1e7a69adbe6f..5f2fd61ef4fb 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -141,9 +141,10 @@ extern void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd);
extern void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
-extern bool backlight_device_registered(enum backlight_type type);
extern int backlight_register_notifier(struct notifier_block *nb);
extern int backlight_unregister_notifier(struct notifier_block *nb);
+extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
new file mode 100644
index 000000000000..c06b47c84e1a
--- /dev/null
+++ b/include/linux/bcm47xx_sprom.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BCM47XX_SPROM_H
+#define __BCM47XX_SPROM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_BCM47XX_SPROM
+int bcm47xx_sprom_register_fallbacks(void);
+#else
+static inline int bcm47xx_sprom_register_fallbacks(void)
+{
+ return -ENOTSUPP;
+};
+#endif
+
+#endif /* __BCM47XX_SPROM_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 88bc64f00bb5..9faebf7f9a33 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -41,7 +41,7 @@
#endif
#define BIO_MAX_PAGES 256
-#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT)
#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
/*
@@ -703,6 +703,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
}
/*
+ * Increment chain count for the bio. Make sure the CHAIN flag update
+ * is visible before the raised count.
+ */
+static inline void bio_inc_remaining(struct bio *bio)
+{
+ bio_set_flag(bio, BIO_CHAIN);
+ smp_mb__before_atomic();
+ atomic_inc(&bio->__bi_remaining);
+}
+
+/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* These memory pools in turn all allocate from the bio_slab
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index defeaac0745f..299e76b59fe9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -227,6 +227,22 @@ static inline unsigned long __ffs64(u64 word)
})
#endif
+#ifndef bit_clear_unless
+#define bit_clear_unless(ptr, _clear, _test) \
+({ \
+ const typeof(*ptr) clear = (_clear), test = (_test); \
+ typeof(*ptr) old, new; \
+ \
+ do { \
+ old = ACCESS_ONCE(*ptr); \
+ new = old & ~clear; \
+ } while (!(old & test) && \
+ cmpxchg(ptr, old, new) != old); \
+ \
+ !(old & test); \
+})
+#endif
+
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9ac9799b702b..2498fdf3a503 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -238,8 +238,8 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
- void *priv);
+void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
+ busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 86a38ea1823f..77e5d81f07aa 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -208,7 +208,7 @@ enum rq_flag_bits {
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
- REQ_SECURE | REQ_INTEGRITY)
+ REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e5d7e018bea..1fd8fdff2f81 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -433,8 +433,6 @@ struct request_queue {
/*
* for flush operations
*/
- unsigned int flush_flags;
- unsigned int flush_not_queueable:1;
struct blk_flush_queue *fq;
struct list_head requeue_list;
@@ -491,6 +489,9 @@ struct request_queue {
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
+#define QUEUE_FLAG_WC 23 /* Write back caching */
+#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -779,7 +780,7 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
- unsigned int len);
+ int offset, unsigned int len);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -1007,8 +1008,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1128,6 +1129,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
@@ -1363,7 +1366,7 @@ static inline unsigned int block_size(struct block_device *bdev)
static inline bool queue_flush_queueable(struct request_queue *q)
{
- return !q->flush_not_queueable;
+ return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
}
typedef struct {struct page *v;} Sector;
@@ -1372,7 +1375,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
static inline void put_dev_sector(Sector p)
{
- page_cache_release(p.v);
+ put_page(p.v);
}
static inline bool __bvec_gap_to_prev(struct request_queue *q,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index afc1343df3c7..0f3172b8b225 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -57,6 +57,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...);
} while (0)
#define BLK_TN_MAX_MSG 128
+static inline bool blk_trace_note_message_enabled(struct request_queue *q)
+{
+ struct blk_trace *bt = q->blk_trace;
+ if (likely(!bt))
+ return false;
+ return bt->act_mask & BLK_TC_NOTIFY;
+}
+
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
@@ -79,6 +87,7 @@ extern struct attribute_group blk_trace_attr_group;
# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_trace_remove_sysfs(dev) do { } while (0)
+# define blk_trace_note_message_enabled(q) (false)
static inline int blk_trace_init_sysfs(struct device *dev)
{
return 0;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 35b22f94d2d2..f9be32691718 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -83,34 +83,34 @@ extern void *__alloc_bootmem(unsigned long size,
unsigned long goal);
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
void *__alloc_bootmem_node_high(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal,
- unsigned long limit);
+ unsigned long limit) __malloc;
extern void *__alloc_bootmem_low(unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
void *__alloc_bootmem_low_nopanic(unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
- unsigned long goal);
+ unsigned long goal) __malloc;
#ifdef CONFIG_NO_BOOTMEM
/* We are using top down, so it is safe to use 0 here */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 21ee41b92e8a..8ee27b8afe81 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -66,6 +66,11 @@ enum bpf_arg_type {
* functions that access data on eBPF program stack
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_PTR_TO_RAW_STACK, /* any pointer to eBPF program stack, area does not
+ * need to be initialized, helper function must fill
+ * all bytes or clear them in error case.
+ */
+
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
@@ -131,6 +136,7 @@ struct bpf_prog_type_list {
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
+ u32 max_ctx_offset;
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
@@ -160,9 +166,12 @@ struct bpf_array {
#define MAX_TAIL_CALL_CNT 32
u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_fd_array_map_clear(struct bpf_map *map);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_event_output_proto(void);
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
@@ -171,12 +180,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index f0ba9c2ec639..e3354b74286c 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -24,6 +24,8 @@
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7346 0x600d8650
+#define PHY_ID_BCM7362 0x600d84b0
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c67f052cc5e5..d48daa3f6f20 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -43,7 +43,7 @@ enum bh_state_bits {
*/
};
-#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
struct page;
struct buffer_head;
@@ -263,7 +263,7 @@ void buffer_init(void);
static inline void attach_page_buffers(struct page *page,
struct buffer_head *head)
{
- page_cache_get(page);
+ get_page(page);
SetPagePrivate(page);
set_page_private(page, (unsigned long)head);
}
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 735f9f8c4e43..5261751f6bd4 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -40,8 +40,11 @@ struct can_priv {
struct can_clock clock;
enum can_state state;
- u32 ctrlmode;
- u32 ctrlmode_supported;
+
+ /* CAN controller features - see include/uapi/linux/can/netlink.h */
+ u32 ctrlmode; /* current options setting */
+ u32 ctrlmode_supported; /* options that can be modified by netlink */
+ u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct timer_list restart_timer;
@@ -108,6 +111,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
return skb->len == CANFD_MTU;
}
+/* helper to define static CAN controller features at device creation time */
+static inline void can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ priv->ctrlmode = static_mode;
+ priv->ctrlmode_static = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ if (static_mode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+}
+
/* get data length from can_dlc with sanitized can_dlc */
u8 can_dlc2len(u8 can_dlc);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 915af3095b39..7c2bb27c067c 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,9 +1,10 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -381,6 +382,35 @@ struct ccp_passthru_engine {
u32 final;
};
+/**
+ * struct ccp_passthru_nomap_engine - CCP pass-through operation
+ * without performing DMA mapping
+ * @bit_mod: bitwise operation to perform
+ * @byte_swap: byteswap operation to perform
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ * @final: indicate final pass-through operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP
+ */
+struct ccp_passthru_nomap_engine {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+
+ dma_addr_t mask;
+ u32 mask_len; /* In bytes */
+
+ dma_addr_t src_dma, dst_dma;
+ u64 src_len; /* In bytes */
+
+ u32 final;
+};
+
/***** ECC engine *****/
#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
#define CCP_ECC_MAX_OPERANDS 6
@@ -522,7 +552,8 @@ enum ccp_engine {
};
/* Flag values for flags member of ccp_cmd */
-#define CCP_CMD_MAY_BACKLOG 0x00000001
+#define CCP_CMD_MAY_BACKLOG 0x00000001
+#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002
/**
* struct ccp_cmd - CPP operation request
@@ -562,6 +593,7 @@ struct ccp_cmd {
struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru;
+ struct ccp_passthru_nomap_engine passthru_nomap;
struct ccp_ecc_engine ecc;
} u;
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
*/
struct ceph_auth_client;
-struct ceph_authorizer;
struct ceph_msg;
+struct ceph_authorizer {
+ void (*destroy)(struct ceph_authorizer *);
+};
+
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
- void (*destroy_authorizer)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e7975e4681e1..db92a8d4926e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc);
*/
static inline int calc_pages_for(u64 off, u64 len)
{
- return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
- (off >> PAGE_CACHE_SHIFT);
+ return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) -
+ (off >> PAGE_SHIFT);
}
extern struct kmem_cache *ceph_inode_cachep;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
-struct ceph_authorizer;
/*
* completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
+ void (*post_attach)(void);
int (*can_fork)(struct task_struct *task);
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a307bf62974f..44a1aff22566 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -15,6 +15,7 @@
#include <linux/cache.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/of.h>
#include <asm/div64.h>
#include <asm/io.h>
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index d7c8de583a23..242b660f64e6 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -39,12 +39,12 @@ extern int sysctl_compact_unevictable_allowed;
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
- int alloc_flags, const struct alloc_context *ac,
- enum migrate_mode mode, int *contended);
+ unsigned int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int classzone_idx);
+ unsigned int alloc_flags, int classzone_idx);
extern void defer_compaction(struct zone *zone, int order);
extern bool compaction_deferred(struct zone *zone, int order);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 22ab246feed3..e2949397c19b 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -142,6 +142,7 @@
#if GCC_VERSION >= 30400
#define __must_check __attribute__((warn_unused_result))
+#define __malloc __attribute__((__malloc__))
#endif
#if GCC_VERSION >= 40000
@@ -199,7 +200,7 @@
#define unreachable() __builtin_unreachable()
/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
+#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
#endif /* GCC_VERSION >= 40500 */
@@ -246,7 +247,7 @@
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index b5ff9881bef8..793c0829e3a3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -357,6 +357,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define __deprecated_for_modules
#endif
+#ifndef __malloc
+#define __malloc
+#endif
+
/*
* Allow us to avoid 'defined but not used' warnings on functions and data,
* as well as force them to be emitted to the assembly file.
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 485fe5519448..d9d6a9d77489 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -188,7 +188,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
}
#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \
-static struct configfs_attribute _pfx##attr_##_name = { \
+static struct configfs_bin_attribute _pfx##attr_##_name = { \
.cb_attr = { \
.ca_name = __stringify(_name), \
.ca_mode = S_IRUGO, \
@@ -200,7 +200,7 @@ static struct configfs_attribute _pfx##attr_##_name = { \
}
#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \
-static struct configfs_attribute _pfx##attr_##_name = { \
+static struct configfs_bin_attribute _pfx##attr_##_name = { \
.cb_attr = { \
.ca_name = __stringify(_name), \
.ca_mode = S_IWUSR, \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f9b1fab4388a..21597dcac0e2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -59,25 +59,7 @@ struct notifier_block;
* CPU notifier priorities.
*/
enum {
- /*
- * SCHED_ACTIVE marks a cpu which is coming up active during
- * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
- * notifier. CPUSET_ACTIVE adjusts cpuset according to
- * cpu_active mask right after SCHED_ACTIVE. During
- * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
- * ordered in the similar way.
- *
- * This ordering guarantees consistent cpu_active mask and
- * migration behavior to all cpu notifiers.
- */
- CPU_PRI_SCHED_ACTIVE = INT_MAX,
- CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
- CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
- CPU_PRI_CPUSET_INACTIVE = INT_MIN,
-
- /* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
- CPU_PRI_MIGRATION = 10,
/* bring up workqueues before normal notifiers and down after */
CPU_PRI_WORKQUEUE_UP = 5,
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
deleted file mode 100644
index 0414009e2c30..000000000000
--- a/include/linux/cpufreq-dt.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2014 Marvell
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __CPUFREQ_DT_H__
-#define __CPUFREQ_DT_H__
-
-struct cpufreq_dt_platform_data {
- /*
- * True when each CPU has its own clock to control its
- * frequency, false when all CPUs are controlled by a single
- * clock.
- */
- bool independent_clocks;
-};
-
-#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 718e8725de8a..4e81e08db752 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -102,6 +102,17 @@ struct cpufreq_policy {
*/
struct rw_semaphore rwsem;
+ /*
+ * Fast switch flags:
+ * - fast_switch_possible should be set by the driver if it can
+ * guarantee that frequency can be changed on any CPU sharing the
+ * policy and that the change will affect all of the policy CPUs then.
+ * - fast_switch_enabled is to be set by governors that support fast
+ * freqnency switching with the help of cpufreq_enable_fast_switch().
+ */
+ bool fast_switch_possible;
+ bool fast_switch_enabled;
+
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
spinlock_t transition_lock;
@@ -156,6 +167,8 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -236,6 +249,8 @@ struct cpufreq_driver {
unsigned int relation); /* Deprecated */
int (*target_index)(struct cpufreq_policy *policy,
unsigned int index);
+ unsigned int (*fast_switch)(struct cpufreq_policy *policy,
+ unsigned int target_freq);
/*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
@@ -426,6 +441,20 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * ondemand governor will work on any processor with transition latency <= 10ms,
+ * using appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
+ * the ondemand governor will not work. All times here are in us (microseconds).
+ */
+#define MIN_SAMPLING_RATE_RATIO (2)
+#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (20)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
/* Governor Events */
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
@@ -450,6 +479,8 @@ struct cpufreq_governor {
};
/* Pass a target to the cpufreq driver */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq);
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -462,6 +493,29 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+/* Governor attribute set */
+struct gov_attr_set {
+ struct kobject kobj;
+ struct list_head policy_list;
+ struct mutex update_lock;
+ int usage_count;
+};
+
+/* sysfs ops for cpufreq governors */
+extern const struct sysfs_ops governor_sysfs_ops;
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+
+/* Governor sysfs attribute */
+struct governor_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
+ ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
+ size_t count);
+};
+
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 5d68e15e46b7..386374d19987 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,6 +8,7 @@ enum cpuhp_state {
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
+ CPUHP_AP_SCHED_STARTING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
@@ -16,6 +17,7 @@ enum cpuhp_state {
CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 40cee6b77a93..e828cf65d7df 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -743,12 +743,10 @@ set_cpu_present(unsigned int cpu, bool present)
static inline void
set_cpu_online(unsigned int cpu, bool online)
{
- if (online) {
+ if (online)
cpumask_set_cpu(cpu, &__cpu_online_mask);
- cpumask_set_cpu(cpu, &__cpu_active_mask);
- } else {
+ else
cpumask_clear_cpu(cpu, &__cpu_online_mask);
- }
}
static inline void
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..bfc204e70338 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -16,26 +16,26 @@
#ifdef CONFIG_CPUSETS
-extern struct static_key cpusets_enabled_key;
+extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
- return static_key_false(&cpusets_enabled_key);
+ return static_branch_unlikely(&cpusets_enabled_key);
}
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
- return static_key_count(&cpusets_enabled_key) + 1;
+ return static_key_count(&cpusets_enabled_key.key) + 1;
}
static inline void cpuset_inc(void)
{
- static_key_slow_inc(&cpusets_enabled_key);
+ static_branch_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
- static_key_slow_dec(&cpusets_enabled_key);
+ static_branch_dec(&cpusets_enabled_key);
}
extern int cpuset_init(void);
@@ -48,16 +48,25 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
+extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
-static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
+static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
- return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
+ if (cpusets_enabled())
+ return __cpuset_node_allowed(node, gfp_mask);
+ return true;
}
-static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+}
+
+static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ if (cpusets_enabled())
+ return __cpuset_zone_allowed(z, gfp_mask);
+ return true;
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -137,8 +146,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
-extern void cpuset_post_attach_flush(void);
-
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -174,14 +181,19 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
+static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
{
- return 1;
+ return true;
}
-static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return 1;
+ return true;
+}
+
+static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ return true;
}
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -245,10 +257,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3849fce7ecfe..3873697ba21c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -34,9 +34,13 @@ void vmcore_cleanup(void);
/*
* Architecture code can redefine this if there are any special checks
- * needed for 64-bit ELF vmcores. In case of 32-bit only architecture,
- * this can be set to zero.
+ * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit
+ * only architecture, vmcore_elf64_check_arch can be set to zero.
*/
+#ifndef vmcore_elf32_check_arch
+#define vmcore_elf32_check_arch(x) elf_check_arch(x)
+#endif
+
#ifndef vmcore_elf64_check_arch
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 99c94899ad0f..6e28c895c376 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -948,8 +948,7 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided ablkcipher
* handle is registered in the request data structure.
*
- * Return: allocated request handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
+ * Return: allocated request handle in case of success, or NULL if out of memory
*/
static inline struct ablkcipher_request *ablkcipher_request_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 636dd59ab505..982a6c4a62f3 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <asm/pgtable.h>
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags);
int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7cb043d8f4e8..f8506e8dd4d4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -123,7 +123,10 @@ struct dentry {
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
- struct list_head d_lru; /* LRU list */
+ union {
+ struct list_head d_lru; /* LRU list */
+ wait_queue_head_t *d_wait; /* in-lookup ones only */
+ };
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
/*
@@ -131,6 +134,7 @@ struct dentry {
*/
union {
struct hlist_node d_alias; /* inode alias list */
+ struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
};
@@ -161,6 +165,7 @@ struct dentry_operations {
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(struct dentry *, bool);
struct inode *(*d_select_inode)(struct dentry *, unsigned);
+ struct dentry *(*d_real)(struct dentry *, struct inode *);
} ____cacheline_aligned;
/*
@@ -229,6 +234,9 @@ struct dentry_operations {
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
+#define DCACHE_OP_REAL 0x08000000
+
+#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
extern seqlock_t rename_lock;
@@ -246,6 +254,8 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+ wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
@@ -365,6 +375,22 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
+extern void __d_lookup_done(struct dentry *);
+
+static inline int d_in_lookup(struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_PAR_LOOKUP;
+}
+
+static inline void d_lookup_done(struct dentry *dentry)
+{
+ if (unlikely(d_in_lookup(dentry))) {
+ spin_lock(&dentry->d_lock);
+ __d_lookup_done(dentry);
+ spin_unlock(&dentry->d_lock);
+ }
+}
+
extern void dput(struct dentry *);
static inline bool d_managed(const struct dentry *dentry)
@@ -555,4 +581,24 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
+static inline struct dentry *d_real(struct dentry *dentry)
+{
+ if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+ return dentry->d_op->d_real(dentry, NULL);
+ else
+ return dentry;
+}
+
+static inline struct inode *vfs_select_inode(struct dentry *dentry,
+ unsigned open_flags)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
+ inode = dentry->d_op->d_select_inode(dentry, open_flags);
+
+ return inode;
+}
+
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 98ffcbd4888e..46056cb161fc 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -38,8 +38,10 @@ struct debug_obj {
* @name: name of the object typee
* @debug_hint: function returning address, which have associated
* kernel symbol, to allow identify the object
+ * @is_static_object return true if the obj is static, otherwise return false
* @fixup_init: fixup function, which is called when the init check
- * fails
+ * fails. All fixup functions must return true if fixup
+ * was successful, otherwise return false
* @fixup_activate: fixup function, which is called when the activate check
* fails
* @fixup_destroy: fixup function, which is called when the destroy check
@@ -51,12 +53,13 @@ struct debug_obj {
*/
struct debug_obj_descr {
const char *name;
- void *(*debug_hint) (void *addr);
- int (*fixup_init) (void *addr, enum debug_obj_state state);
- int (*fixup_activate) (void *addr, enum debug_obj_state state);
- int (*fixup_destroy) (void *addr, enum debug_obj_state state);
- int (*fixup_free) (void *addr, enum debug_obj_state state);
- int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
+ void *(*debug_hint)(void *addr);
+ bool (*is_static_object)(void *addr);
+ bool (*fixup_init)(void *addr, enum debug_obj_state state);
+ bool (*fixup_activate)(void *addr, enum debug_obj_state state);
+ bool (*fixup_destroy)(void *addr, enum debug_obj_state state);
+ bool (*fixup_free)(void *addr, enum debug_obj_state state);
+ bool (*fixup_assert_init)(void *addr, enum debug_obj_state state);
};
#ifdef CONFIG_DEBUG_OBJECTS
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 6fa02a20eb63..2de4e2eea180 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -19,6 +19,13 @@
#define DEVFREQ_NAME_LEN 16
+/* DEVFREQ notifier interface */
+#define DEVFREQ_TRANSITION_NOTIFIER (0)
+
+/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */
+#define DEVFREQ_PRECHANGE (0)
+#define DEVFREQ_POSTCHANGE (1)
+
struct devfreq;
/**
@@ -143,6 +150,7 @@ struct devfreq_governor {
* @trans_table: Statistics of devfreq transitions
* @time_in_state: Statistics of devfreq states
* @last_stat_updated: The last time stat updated
+ * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
*
* This structure stores the devfreq information for a give device.
*
@@ -177,6 +185,13 @@ struct devfreq {
unsigned int *trans_table;
unsigned long *time_in_state;
unsigned long last_stat_updated;
+
+ struct srcu_notifier_head transition_notifier_list;
+};
+
+struct devfreq_freqs {
+ unsigned long old;
+ unsigned long new;
};
#if defined(CONFIG_PM_DEVFREQ)
@@ -207,6 +222,22 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev,
struct devfreq *devfreq);
extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq *devfreq);
+extern int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern int devm_devfreq_register_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern void devm_devfreq_unregister_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ int index);
/**
* devfreq_update_stats() - update the last_status pointer in struct devfreq
@@ -241,6 +272,39 @@ struct devfreq_simple_ondemand_data {
};
#endif
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+/**
+ * struct devfreq_passive_data - void *data fed to struct devfreq
+ * and devfreq_add_device
+ * @parent: the devfreq instance of parent device.
+ * @get_target_freq: Optional callback, Returns desired operating frequency
+ * for the device using passive governor. That is called
+ * when passive governor should decide the next frequency
+ * by using the new frequency of parent devfreq device
+ * using governors except for passive governor.
+ * If the devfreq device has the specific method to decide
+ * the next frequency, should use this callback.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ *
+ * The devfreq_passive_data have to set the devfreq instance of parent
+ * device with governors except for the passive governor. But, don't need to
+ * initialize the 'this' and 'nb' field because the devfreq core will handle
+ * them.
+ */
+struct devfreq_passive_data {
+ /* Should set the devfreq instance of parent device */
+ struct devfreq *parent;
+
+ /* Optional callback to decide the next frequency of passvice device */
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+
+ /* For passive governor's internal use. Don't need to set them */
+ struct devfreq *this;
+ struct notifier_block nb;
+};
+#endif
+
#else /* !CONFIG_PM_DEVFREQ */
static inline struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
@@ -307,6 +371,41 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
{
}
+static inline int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+
+static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+
+static inline int devm_devfreq_register_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+
+static inline void devm_devfreq_unregister_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+}
+
+static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline int devfreq_update_stats(struct devfreq *df)
{
return -EINVAL;
diff --git a/include/linux/device.h b/include/linux/device.h
index 002c59728dbe..ca90ad8bcd61 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -609,14 +609,14 @@ typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
#ifdef CONFIG_DEBUG_DEVRES
extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name);
+ int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
#else
extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid);
+ int nid) __malloc;
static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
@@ -648,12 +648,12 @@ extern void devres_remove_group(struct device *dev, void *id);
extern int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
-extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
extern __printf(3, 0)
char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap);
+ va_list ap) __malloc;
extern __printf(3, 4)
-char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -671,7 +671,7 @@ static inline void *devm_kcalloc(struct device *dev,
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
extern void devm_kfree(struct device *dev, void *p);
-extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
gfp_t gfp);
@@ -956,11 +956,6 @@ static inline bool device_async_suspend_enabled(struct device *dev)
return !!dev->power.async_suspend;
}
-static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
-{
- dev->power.ignore_children = enable;
-}
-
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3000b2..5871f292b596 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,37 +15,23 @@
#include <linux/errno.h>
+struct pts_fs_info;
+
#ifdef CONFIG_UNIX98_PTYS
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
/* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
- void *priv);
+struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
/* get private structure */
-void *devpts_get_priv(struct inode *pts_inode);
+void *devpts_get_priv(struct dentry *);
/* unlink */
-void devpts_pty_kill(struct inode *inode);
-
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
- dev_t device, int index, void *priv)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
- return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
+void devpts_pty_kill(struct dentry *);
#endif
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index fc481037478a..8443bbb5c071 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* These implement the bulk of the relevant DMA mapping callbacks, but require
* the arch code to take care of attributes and cache maintenance
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+ struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 9ea9aba28049..71c1b215ef66 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev);
#ifndef arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, struct iommu_ops *iommu,
+ u64 size, const struct iommu_ops *iommu,
bool coherent) { }
#endif
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index 71456442ebe3..f2e538aaddad 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -27,6 +27,7 @@ struct dw_dma;
* @regs: memory mapped I/O space
* @clk: hclk clock
* @dw: struct dw_dma that is filed by dw_dma_probe()
+ * @pdata: pointer to platform data
*/
struct dw_dma_chip {
struct device *dev;
@@ -34,10 +35,12 @@ struct dw_dma_chip {
void __iomem *regs;
struct clk *clk;
struct dw_dma *dw;
+
+ const struct dw_dma_platform_data *pdata;
};
/* Export to the platform drivers */
-int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_probe(struct dw_dma_chip *chip);
int dw_dma_remove(struct dw_dma_chip *chip);
/* DMA API extensions */
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f276ed0..3ae300052553 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -41,6 +41,20 @@ struct xilinx_vdma_config {
int ext_fsync;
};
+/**
+ * enum xdma_ip_type: DMA IP type.
+ *
+ * XDMA_TYPE_AXIDMA: Axi dma ip.
+ * XDMA_TYPE_CDMA: Axi cdma ip.
+ * XDMA_TYPE_VDMA: Axi vdma ip.
+ *
+ */
+enum xdma_ip_type {
+ XDMA_TYPE_AXIDMA = 0,
+ XDMA_TYPE_CDMA,
+ XDMA_TYPE_VDMA,
+};
+
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
struct xilinx_vdma_config *cfg);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 017433712833..30de0197263a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
sg_dma_address(&sg) = buf;
sg_dma_len(&sg) = len;
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
}
@@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, NULL);
}
@@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
enum dma_transfer_direction dir, unsigned long flags,
struct rio_dma_ext *rio_ext)
{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, rio_ext);
}
@@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+ return NULL;
+
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
period_len, dir, flags);
}
@@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+ return NULL;
+
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
@@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
{
- if (!chan || !chan->device)
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
return NULL;
return chan->device->device_prep_dma_memset(chan, dest, value,
@@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+ return NULL;
+
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
src_sg, src_nents, flags);
}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 1626474567ac..df7acb51f3cc 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -21,6 +21,7 @@
#include <linux/pfn.h>
#include <linux/pstore.h>
#include <linux/reboot.h>
+#include <linux/screen_info.h>
#include <asm/page.h>
@@ -124,6 +125,13 @@ typedef struct {
} efi_capsule_header_t;
/*
+ * EFI capsule flags
+ */
+#define EFI_CAPSULE_PERSIST_ACROSS_RESET 0x00010000
+#define EFI_CAPSULE_POPULATE_SYSTEM_TABLE 0x00020000
+#define EFI_CAPSULE_INITIATE_RESET 0x00040000
+
+/*
* Allocation types for calls to boottime->allocate_pages.
*/
#define EFI_ALLOCATE_ANY_PAGES 0
@@ -282,9 +290,10 @@ typedef struct {
efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
void *__reserved;
void *register_protocol_notify;
- void *locate_handle;
+ efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
+ unsigned long *, efi_handle_t *);
void *locate_device_path;
- void *install_configuration_table;
+ efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
void *load_image;
void *start_image;
void *exit;
@@ -623,6 +632,27 @@ void efi_native_runtime_setup(void);
EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
+ EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
+ 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+
+#define EFI_CONSOLE_OUT_DEVICE_GUID \
+ EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
+ 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+
+/*
+ * This GUID is used to pass to the kernel proper the struct screen_info
+ * structure that was populated by the stub based on the GOP protocol instance
+ * associated with ConOut
+ */
+#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \
+ EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \
+ 0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
+#define LINUX_EFI_LOADER_ENTRY_GUID \
+ EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
+ 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+
typedef struct {
efi_guid_t guid;
u64 table;
@@ -847,6 +877,14 @@ typedef struct {
#define EFI_INVALID_TABLE_ADDR (~0UL)
+typedef struct {
+ u32 version;
+ u32 num_entries;
+ u32 desc_size;
+ u32 reserved;
+ efi_memory_desc_t entry[0];
+} efi_memory_attributes_table_t;
+
/*
* All runtime access to EFI goes through this structure:
*/
@@ -868,6 +906,7 @@ extern struct efi {
unsigned long config_table; /* config tables */
unsigned long esrt; /* ESRT table */
unsigned long properties_table; /* properties table */
+ unsigned long mem_attr_table; /* memory attributes table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -883,7 +922,7 @@ extern struct efi {
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
- struct efi_memory_map *memmap;
+ struct efi_memory_map memmap;
unsigned long flags;
} efi;
@@ -945,7 +984,6 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
-extern struct efi_memory_map memmap;
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
@@ -957,12 +995,34 @@ extern void __init efi_fake_memmap(void);
static inline void efi_fake_memmap(void) { }
#endif
+/*
+ * efi_memattr_perm_setter - arch specific callback function passed into
+ * efi_memattr_apply_permissions() that updates the
+ * mapping permissions described by the second
+ * argument in the page tables referred to by the
+ * first argument.
+ */
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+
+extern int efi_memattr_init(void);
+extern int efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn);
+
/* Iterate through an efi_memory_map */
-#define for_each_efi_memory_desc(m, md) \
+#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
(md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
(md) = (void *)(md) + (m)->desc_size)
+/**
+ * for_each_efi_memory_desc - iterate over descriptors in efi.memmap
+ * @md: the efi_memory_desc_t * iterator
+ *
+ * Once the loop finishes @md must not be accessed.
+ */
+#define for_each_efi_memory_desc(md) \
+ for_each_efi_memory_desc_in_map(&efi.memmap, md)
+
/*
* Format an EFI memory descriptor's type and attributes to a user-provided
* character buffer, as per snprintf(), and return the buffer.
@@ -1000,7 +1060,6 @@ extern int __init efi_setup_pcdp_console(char *);
* possible, remove EFI-related code altogether.
*/
#define EFI_BOOT 0 /* Were we booted from EFI? */
-#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
@@ -1026,8 +1085,16 @@ static inline bool efi_enabled(int feature)
}
static inline void
efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
+
+static inline bool
+efi_capsule_pending(int *reset_type)
+{
+ return false;
+}
#endif
+extern int efi_status_to_err(efi_status_t status);
+
/*
* Variable Attributes
*/
@@ -1180,6 +1247,80 @@ struct efi_simple_text_output_protocol {
void *test_string;
};
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
+#define PIXEL_BIT_MASK 2
+#define PIXEL_BLT_ONLY 3
+#define PIXEL_FORMAT_MAX 4
+
+struct efi_pixel_bitmask {
+ u32 red_mask;
+ u32 green_mask;
+ u32 blue_mask;
+ u32 reserved_mask;
+};
+
+struct efi_graphics_output_mode_info {
+ u32 version;
+ u32 horizontal_resolution;
+ u32 vertical_resolution;
+ int pixel_format;
+ struct efi_pixel_bitmask pixel_information;
+ u32 pixels_per_scan_line;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_32 {
+ u32 max_mode;
+ u32 mode;
+ u32 info;
+ u32 size_of_info;
+ u64 frame_buffer_base;
+ u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+ u32 max_mode;
+ u32 mode;
+ u64 info;
+ u64 size_of_info;
+ u64 frame_buffer_base;
+ u64 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode {
+ u32 max_mode;
+ u32 mode;
+ unsigned long info;
+ unsigned long size_of_info;
+ u64 frame_buffer_base;
+ unsigned long frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_32 {
+ u32 query_mode;
+ u32 set_mode;
+ u32 blt;
+ u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+ u64 query_mode;
+ u64 set_mode;
+ u64 blt;
+ u64 mode;
+};
+
+struct efi_graphics_output_protocol {
+ unsigned long query_mode;
+ unsigned long set_mode;
+ unsigned long blt;
+ struct efi_graphics_output_protocol_mode *mode;
+};
+
+typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
+ struct efi_graphics_output_protocol *, u32, unsigned long *,
+ struct efi_graphics_output_mode_info **);
+
extern struct list_head efivar_sysfs_list;
static inline void
@@ -1195,8 +1336,7 @@ int efivars_unregister(struct efivars *efivars);
struct kobject *efivars_kobject(void);
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool atomic, bool duplicates,
- struct list_head *head);
+ void *data, bool duplicates, struct list_head *head);
void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
void efivar_entry_remove(struct efivar_entry *entry);
@@ -1242,6 +1382,13 @@ int efivars_sysfs_init(void);
#define EFIVARS_DATA_SIZE_MAX 1024
#endif /* CONFIG_EFI_VARS */
+extern bool efi_capsule_pending(int *reset_type);
+
+extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
+ size_t size, int *reset);
+
+extern int efi_capsule_update(efi_capsule_header_t *capsule,
+ struct page **pages);
#ifdef CONFIG_EFI_RUNTIME_MAP
int efi_runtime_map_init(struct kobject *);
@@ -1319,5 +1466,9 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+ struct screen_info *si, efi_guid_t *proto,
+ unsigned long size);
+
bool efi_runtime_disabled(void);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e2b7bf27c03e..9ded8c6d8176 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -150,6 +150,13 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst,
+ u32 legacy_u32);
+
+/* return false if src had higher bits set. lower bits always updated. */
+bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+ const unsigned long *src);
+
/**
* struct ethtool_ops - optional netdev operations
* @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 9eb215a155e0..b90e9bdbd1dd 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -262,7 +262,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -282,7 +282,7 @@ struct f2fs_nat_block {
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
* Note that f2fs_sit_entry->vblocks has the following bit-field information.
diff --git a/include/linux/file.h b/include/linux/file.h
index f87d30882a24..7444f5feda12 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -44,6 +44,7 @@ extern struct file *fget_raw(unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);
extern unsigned long __fdget_pos(unsigned int fd);
+extern void __f_unlock_pos(struct file *);
static inline struct fd __to_fd(unsigned long v)
{
@@ -60,6 +61,18 @@ static inline struct fd fdget_raw(unsigned int fd)
return __to_fd(__fdget_raw(fd));
}
+static inline struct fd fdget_pos(int fd)
+{
+ return __to_fd(__fdget_pos(fd));
+}
+
+static inline void fdput_pos(struct fd f)
+{
+ if (f.flags & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(f.file);
+ fdput(f);
+}
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 43aa1f8855c7..6fc31ef1da2d 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -13,6 +13,8 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/capability.h>
+
#include <net/sch_generic.h>
#include <asm/cacheflush.h>
@@ -42,6 +44,15 @@ struct bpf_prog_aux;
#define BPF_REG_X BPF_REG_7
#define BPF_REG_TMP BPF_REG_8
+/* Kernel hidden auxiliary/helper register for hardening step.
+ * Only used by eBPF JITs. It's nothing more than a temporary
+ * register that JITs use internally, only that here it's part
+ * of eBPF instructions that have been rewritten for blinding
+ * constants. See JIT pre-step in bpf_jit_blind_constants().
+ */
+#define BPF_REG_AX MAX_BPF_REG
+#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
@@ -352,6 +363,22 @@ struct sk_filter {
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
+struct bpf_skb_data_end {
+ struct qdisc_skb_cb qdisc_cb;
+ void *data_end;
+};
+
+/* compute the linear packet data range [data, data_end) which
+ * will be accessed by cls_bpf and act_bpf programs
+ */
+static inline void bpf_compute_data_end(struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -442,7 +469,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb);
-int bpf_prog_select_runtime(struct bpf_prog *fp);
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
@@ -476,10 +503,17 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_int_jit_compile(struct bpf_prog *fp);
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_skb_data(void *func);
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+ const struct bpf_insn *patch, u32 len);
+
#ifdef CONFIG_BPF_JIT
+extern int bpf_jit_enable;
+extern int bpf_jit_harden;
+
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
struct bpf_binary_header *
@@ -491,6 +525,9 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_compile(struct bpf_prog *fp);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
+void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
+
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
@@ -501,6 +538,33 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
16, 1, image, proglen, false);
}
+
+static inline bool bpf_jit_is_ebpf(void)
+{
+# ifdef CONFIG_HAVE_EBPF_JIT
+ return true;
+# else
+ return false;
+# endif
+}
+
+static inline bool bpf_jit_blinding_enabled(void)
+{
+ /* These are the prerequisites, should someone ever have the
+ * idea to call blinding outside of them, we make sure to
+ * bail out.
+ */
+ if (!bpf_jit_is_ebpf())
+ return false;
+ if (!bpf_jit_enable)
+ return false;
+ if (!bpf_jit_harden)
+ return false;
+ if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ return false;
+
+ return true;
+}
#else
static inline void bpf_jit_compile(struct bpf_prog *fp)
{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 14a97194b34b..10d3d8f8a65b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -323,6 +323,8 @@ struct writeback_control;
#define IOCB_APPEND (1 << 1)
#define IOCB_DIRECT (1 << 2)
#define IOCB_HIPRI (1 << 3)
+#define IOCB_DSYNC (1 << 4)
+#define IOCB_SYNC (1 << 5)
struct kiocb {
struct file *ki_filp;
@@ -394,7 +396,7 @@ struct address_space_operations {
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
- ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
* migrate the contents of a page to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
@@ -577,6 +579,18 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+static inline struct posix_acl *
+uncached_acl_sentinel(struct task_struct *task)
+{
+ return (void *)task + 1;
+}
+
+static inline bool
+is_uncached_acl(struct posix_acl *acl)
+{
+ return (long)acl & 1;
+}
+
#define IOP_FASTPERM 0x0001
#define IOP_LOOKUP 0x0002
#define IOP_NOFOLLOW 0x0004
@@ -635,7 +649,7 @@ struct inode {
/* Misc */
unsigned long i_state;
- struct mutex i_mutex;
+ struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
unsigned long dirtied_time_when;
@@ -672,6 +686,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
+ unsigned i_dir_seq;
};
__u32 i_generation;
@@ -721,27 +736,42 @@ enum inode_i_mutex_lock_class
static inline void inode_lock(struct inode *inode)
{
- mutex_lock(&inode->i_mutex);
+ down_write(&inode->i_rwsem);
}
static inline void inode_unlock(struct inode *inode)
{
- mutex_unlock(&inode->i_mutex);
+ up_write(&inode->i_rwsem);
+}
+
+static inline void inode_lock_shared(struct inode *inode)
+{
+ down_read(&inode->i_rwsem);
+}
+
+static inline void inode_unlock_shared(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
}
static inline int inode_trylock(struct inode *inode)
{
- return mutex_trylock(&inode->i_mutex);
+ return down_write_trylock(&inode->i_rwsem);
+}
+
+static inline int inode_trylock_shared(struct inode *inode)
+{
+ return down_read_trylock(&inode->i_rwsem);
}
static inline int inode_is_locked(struct inode *inode)
{
- return mutex_is_locked(&inode->i_mutex);
+ return rwsem_is_locked(&inode->i_rwsem);
}
static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
{
- mutex_lock_nested(&inode->i_mutex, subclass);
+ down_write_nested(&inode->i_rwsem, subclass);
}
void lock_two_nondirectories(struct inode *, struct inode*);
@@ -929,7 +959,7 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
#elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
#endif
@@ -1241,6 +1271,16 @@ static inline struct inode *file_inode(const struct file *f)
return f->f_inode;
}
+static inline struct dentry *file_dentry(const struct file *file)
+{
+ struct dentry *dentry = file->f_path.dentry;
+
+ if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
+ return dentry->d_op->d_real(dentry, file_inode(file));
+ else
+ return dentry;
+}
+
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
{
return locks_lock_inode_wait(file_inode(filp), fl);
@@ -1636,6 +1676,7 @@ struct file_operations {
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
+ int (*iterate_shared) (struct file *, struct dir_context *);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1690,7 +1731,8 @@ struct inode_operations {
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
- ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ ssize_t (*getxattr) (struct dentry *, struct inode *,
+ const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
@@ -2067,7 +2109,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
-#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
#ifdef CONFIG_MANDATORY_FILE_LOCKING
extern int locks_mandatory_locked(struct file *);
@@ -2253,7 +2295,7 @@ struct filename {
const char iname[];
};
-extern long vfs_truncate(struct path *, loff_t);
+extern long vfs_truncate(const struct path *, loff_t);
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
@@ -2475,13 +2517,25 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
-static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
-{
- if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
- return 0;
- return vfs_fsync_range(file, pos, pos + count - 1,
- (file->f_flags & __O_SYNC) ? 0 : 1);
+
+/*
+ * Sync the bytes written if this was a synchronous write. Expect ki_pos
+ * to already be updated for the write, and will return either the amount
+ * of bytes passed in, or an error if syncing the file failed.
+ */
+static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
+{
+ if (iocb->ki_flags & IOCB_DSYNC) {
+ int ret = vfs_fsync_range(iocb->ki_filp,
+ iocb->ki_pos - count, iocb->ki_pos - 1,
+ (iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
+ if (ret)
+ return ret;
+ }
+
+ return count;
}
+
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
@@ -2580,15 +2634,34 @@ static inline void i_readcount_inc(struct inode *inode)
#endif
extern int do_pipe_flags(int *, int);
+#define __kernel_read_file_id(id) \
+ id(UNKNOWN, unknown) \
+ id(FIRMWARE, firmware) \
+ id(MODULE, kernel-module) \
+ id(KEXEC_IMAGE, kexec-image) \
+ id(KEXEC_INITRAMFS, kexec-initramfs) \
+ id(POLICY, security-policy) \
+ id(MAX_ID, )
+
+#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
+#define __fid_stringify(dummy, str) #str,
+
enum kernel_read_file_id {
- READING_FIRMWARE = 1,
- READING_MODULE,
- READING_KEXEC_IMAGE,
- READING_KEXEC_INITRAMFS,
- READING_POLICY,
- READING_MAX_ID
+ __kernel_read_file_id(__fid_enumify)
};
+static const char * const kernel_read_file_str[] = {
+ __kernel_read_file_id(__fid_stringify)
+};
+
+static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
+{
+ if (id < 0 || id >= READING_MAX_ID)
+ return kernel_read_file_str[READING_UNKNOWN];
+
+ return kernel_read_file_str[id];
+}
+
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
enum kernel_read_file_id);
@@ -2693,7 +2766,7 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
@@ -2756,18 +2829,17 @@ void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
- loff_t offset, get_block_t get_block,
+ get_block_t get_block,
dio_iodone_t end_io, dio_submit_t submit_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
struct inode *inode,
- struct iov_iter *iter, loff_t offset,
+ struct iov_iter *iter,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- offset, get_block, NULL, NULL,
- DIO_LOCKING | DIO_SKIP_HOLES);
+ get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
@@ -2933,6 +3005,10 @@ static inline int iocb_flags(struct file *file)
res |= IOCB_APPEND;
if (io_is_direct(file))
res |= IOCB_DIRECT;
+ if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ res |= IOCB_DSYNC;
+ if (file->f_flags & __O_SYNC)
+ res |= IOCB_SYNC;
return res;
}
@@ -3094,6 +3170,13 @@ static inline bool dir_relax(struct inode *inode)
return !IS_DEADDIR(inode);
}
+static inline bool dir_relax_shared(struct inode *inode)
+{
+ inode_unlock_shared(inode);
+ inode_lock_shared(inode);
+ return !IS_DEADDIR(inode);
+}
+
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cd91f75de49b..6027f6bbb061 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -263,9 +263,9 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
extern struct kmem_cache *fscrypt_info_cachep;
int fscrypt_initialize(void);
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *);
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
extern int fscrypt_decrypt_page(struct page *);
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
@@ -299,7 +299,8 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
#endif
/* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i)
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+ gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -310,7 +311,7 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
}
static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
- struct page *p)
+ struct page *p, gfp_t f)
{
return ERR_PTR(-EOPNOTSUPP);
}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 1259e53d9296..29f917517299 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -359,8 +359,6 @@ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
-/* run all the marks in a group, and flag them to be freed */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct super_block *sb);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dea12a6e413b..66a36a815f0a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
+unsigned long ftrace_location_range(unsigned long start, unsigned long end);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eecd19b37001..6270a56e5edc 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -62,6 +62,11 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/* MAGIC helpers {{{2 */
+static inline int nla_put_u64_0pad(struct sk_buff *skb, int attrtype, u64 value)
+{
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &value, 0);
+}
+
/* possible field types */
#define __flg_field(attr_nr, attr_flag, name) \
__field(attr_nr, attr_flag, name, NLA_U8, char, \
@@ -80,7 +85,7 @@ extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
nla_get_u32, nla_put_u32, true)
#define __u64_field(attr_nr, attr_flag, name) \
__field(attr_nr, attr_flag, name, NLA_U64, __u64, \
- nla_get_u64, nla_put_u64, false)
+ nla_get_u64, nla_put_u64_0pad, false)
#define __str_field(attr_nr, attr_flag, name, maxlen) \
__array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
nla_strlcpy, nla_put, false)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index bee976f82788..50882e09289b 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,18 @@ struct gpio_device;
#ifdef CONFIG_GPIOLIB
/**
+ * enum single_ended_mode - mode for single ended operation
+ * @LINE_MODE_PUSH_PULL: normal mode for a GPIO line, drive actively high/low
+ * @LINE_MODE_OPEN_DRAIN: set line to be open drain
+ * @LINE_MODE_OPEN_SOURCE: set line to be open source
+ */
+enum single_ended_mode {
+ LINE_MODE_PUSH_PULL,
+ LINE_MODE_OPEN_DRAIN,
+ LINE_MODE_OPEN_SOURCE,
+};
+
+/**
* struct gpio_chip - abstract a GPIO controller
* @label: a functional name for the GPIO device, such as a part
* number or the name of the SoC IP-block implementing it.
@@ -38,7 +50,15 @@ struct gpio_device;
* @set: assigns output value for signal "offset"
* @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_debounce: optional hook for setting debounce time for specified gpio in
- * interrupt triggered gpio chips
+ * interrupt triggered gpio chips
+ * @set_single_ended: optional hook for setting a line as open drain, open
+ * source, or non-single ended (restore from open drain/source to normal
+ * push-pull mode) this should be implemented if the hardware supports
+ * open drain or open source settings. The GPIOlib will otherwise try
+ * to emulate open drain/source by not actively driving lines high/low
+ * if a consumer request this. The driver may return -ENOTSUPP if e.g.
+ * it supports just open drain but not open source and is called
+ * with LINE_MODE_OPEN_SOURCE as mode argument.
* @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
@@ -130,6 +150,9 @@ struct gpio_chip {
int (*set_debounce)(struct gpio_chip *chip,
unsigned offset,
unsigned debounce);
+ int (*set_single_ended)(struct gpio_chip *chip,
+ unsigned offset,
+ enum single_ended_mode mode);
int (*to_irq)(struct gpio_chip *chip,
unsigned offset);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 1afde47e1528..79c52fa81cac 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -32,12 +32,28 @@
#error Wordsize not 32 or 64
#endif
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
static __always_inline u64 hash_64(u64 val, unsigned int bits)
{
u64 hash = val;
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_64;
#else
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 79b0ef6aaa14..419fb9e03447 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -28,9 +28,7 @@ extern int zap_huge_pmd(struct mmu_gather *tlb,
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
-extern bool move_huge_pmd(struct vm_area_struct *vma,
- struct vm_area_struct *new_vma,
- unsigned long old_addr,
+extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
@@ -127,7 +125,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
- return false;
+ return NULL;
}
static inline int hpage_nr_pages(struct page *page)
{
@@ -152,6 +150,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
}
struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +207,10 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline void put_huge_zero_page(void)
+{
+ BUILD_BUG();
+}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7d953c2542a8..e44c57876e89 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -338,6 +338,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
+void __init hugetlb_bad_size(void);
void __init hugetlb_add_hstate(unsigned order);
struct hstate *size_to_hstate(unsigned long size);
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 2bb681fbeb35..a4e7ca0f3585 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -5,16 +5,16 @@
#include <linux/mm.h>
-static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return !!(vma->vm_flags & VM_HUGETLB);
}
#else
-static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
{
- return 0;
+ return false;
}
#endif
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index b5f9a007a3ab..d4c1d12f900d 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -27,22 +27,49 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
+
+struct i2c_mux_core {
+ struct i2c_adapter *parent;
+ struct device *dev;
+ bool mux_locked;
+
+ void *priv;
+
+ int (*select)(struct i2c_mux_core *, u32 chan_id);
+ int (*deselect)(struct i2c_mux_core *, u32 chan_id);
+
+ int num_adapters;
+ int max_adapters;
+ struct i2c_adapter *adapter[0];
+};
+
+struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
+ struct device *dev, int max_adapters,
+ int sizeof_priv, u32 flags,
+ int (*select)(struct i2c_mux_core *, u32),
+ int (*deselect)(struct i2c_mux_core *, u32));
+
+/* flags for i2c_mux_alloc */
+#define I2C_MUX_LOCKED BIT(0)
+
+static inline void *i2c_mux_priv(struct i2c_mux_core *muxc)
+{
+ return muxc->priv;
+}
+
+struct i2c_adapter *i2c_root_adapter(struct device *dev);
+
/*
- * Called to create a i2c bus on a multiplexed bus segment.
- * The mux_dev and chan_id parameters are passed to the select
- * and deselect callback functions to perform hardware-specific
- * mux control.
+ * Called to create an i2c bus on a multiplexed bus segment.
+ * The chan_id parameter is passed to the select and deselect
+ * callback functions to perform hardware-specific mux control.
*/
-struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- struct device *mux_dev,
- void *mux_priv, u32 force_nr, u32 chan_id,
- unsigned int class,
- int (*select) (struct i2c_adapter *,
- void *mux_dev, u32 chan_id),
- int (*deselect) (struct i2c_adapter *,
- void *mux_dev, u32 chan_id));
-
-void i2c_del_mux_adapter(struct i2c_adapter *adap);
+int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+ u32 force_nr, u32 chan_id,
+ unsigned int class);
+
+void i2c_mux_del_adapters(struct i2c_mux_core *muxc);
#endif /* __KERNEL__ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 200cf13b00f6..96a25ae14494 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -524,6 +524,7 @@ struct i2c_adapter {
/* data fields that are valid for all devices */
struct rt_mutex bus_lock;
+ struct rt_mutex mux_lock;
int timeout; /* in jiffies */
int retries;
@@ -538,6 +539,10 @@ struct i2c_adapter {
struct i2c_bus_recovery_info *bus_recovery_info;
const struct i2c_adapter_quirks *quirks;
+
+ void (*lock_bus)(struct i2c_adapter *, unsigned int flags);
+ int (*trylock_bus)(struct i2c_adapter *, unsigned int flags);
+ void (*unlock_bus)(struct i2c_adapter *, unsigned int flags);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -567,8 +572,44 @@ i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
/* Adapter locking functions, exported for shared pin cases */
-void i2c_lock_adapter(struct i2c_adapter *);
-void i2c_unlock_adapter(struct i2c_adapter *);
+#define I2C_LOCK_ROOT_ADAPTER BIT(0)
+#define I2C_LOCK_SEGMENT BIT(1)
+
+/**
+ * i2c_lock_bus - Get exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT
+ * locks only this branch in the adapter tree
+ */
+static inline void
+i2c_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ adapter->lock_bus(adapter, flags);
+}
+
+/**
+ * i2c_unlock_bus - Release exclusive access to an I2C bus segment
+ * @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT
+ * unlocks only this branch in the adapter tree
+ */
+static inline void
+i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ adapter->unlock_bus(adapter, flags);
+}
+
+static inline void
+i2c_lock_adapter(struct i2c_adapter *adapter)
+{
+ i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+}
+
+static inline void
+i2c_unlock_adapter(struct i2c_adapter *adapter)
+{
+ i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
+}
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
@@ -654,6 +695,11 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
return adap->nr;
}
+static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
+{
+ return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+}
+
/**
* module_i2c_driver() - Helper macro for registering a modular I2C driver
* @__i2c_driver: i2c_driver struct
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
deleted file mode 100644
index 52baa79d69a7..000000000000
--- a/include/linux/i2c/sx150x.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Driver for the Semtech SX150x I2C GPIO Expanders
- *
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#ifndef __LINUX_I2C_SX150X_H
-#define __LINUX_I2C_SX150X_H
-
-/**
- * struct sx150x_platform_data - config data for SX150x driver
- * @gpio_base: The index number of the first GPIO assigned to this
- * GPIO expander. The expander will create a block of
- * consecutively numbered gpios beginning at the given base,
- * with the size of the block depending on the model of the
- * expander chip.
- * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
- * instead of as an oscillator, increasing the size of the
- * GP(I)O pool created by this expander by one. The
- * output-only GPO pin will be added at the end of the block.
- * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
- * for each IO line in the expander. Setting the bit at
- * position n will enable the pull-up for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
- * resistor for each IO line in the expander. Setting the
- * bit at position n will enable the pull-down for the IO at
- * the corresponding offset. For chips with fewer than
- * 16 IO pins, high-end bits are ignored.
- * @io_open_drain_ena: A bit-mask which enables-or disables open-drain
- * operation for each IO line in the expander. Setting the
- * bit at position n enables open-drain operation for
- * the IO at the corresponding offset. Clearing the bit
- * enables regular push-pull operation for that IO.
- * For chips with fewer than 16 IO pins, high-end bits
- * are ignored.
- * @io_polarity: A bit-mask which enables polarity inversion for each IO line
- * in the expander. Setting the bit at position n inverts
- * the polarity of that IO line, while clearing it results
- * in normal polarity. For chips with fewer than 16 IO pins,
- * high-end bits are ignored.
- * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
- * is connected, via which it reports interrupt events
- * across all GPIO lines. This must be a real,
- * pre-existing IRQ line.
- * Setting this value < 0 disables the irq_chip functionality
- * of the driver.
- * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
- * IRQ lines will appear. Similarly to gpio_base, the expander
- * will create a block of irqs beginning at this number.
- * This value is ignored if irq_summary is < 0.
- * @reset_during_probe: If set to true, the driver will trigger a full
- * reset of the chip at the beginning of the probe
- * in order to place it in a known state.
- */
-struct sx150x_platform_data {
- unsigned gpio_base;
- bool oscio_is_gpo;
- u16 io_pullup_ena;
- u16 io_pulldn_ena;
- u16 io_open_drain_ena;
- u16 io_polarity;
- int irq_summary;
- unsigned irq_base;
- bool reset_during_probe;
-};
-
-#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 3b1f6cef9513..b118744d3382 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -7,6 +7,7 @@
* Copyright (c) 2005, Devicescape Software, Inc.
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -163,6 +164,9 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
+/* Maximal size of an A-MSDU that can be transported in a HT BA session */
+#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
+
/* Maximal size of an A-MSDU */
#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
@@ -637,6 +641,16 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
}
+/**
+ * ieee80211_is_frag - check if a frame is a fragment
+ * @hdr: 802.11 header of the frame
+ */
+static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
+{
+ return ieee80211_has_morefrags(hdr->frame_control) ||
+ hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+}
+
struct ieee80211s_hdr {
u8 flags;
u8 ttl;
@@ -1011,6 +1025,16 @@ struct ieee80211_mgmt {
u8 tpc_elem_length;
struct ieee80211_tpc_report_ie tpc;
} __packed tpc_report;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u8 tod[6];
+ u8 toa[6];
+ __le16 tod_error;
+ __le16 toa_error;
+ u8 variable[0];
+ } __packed ftm;
} u;
} __packed action;
} u;
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index d3e415674dac..acedbb68a5a3 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -47,6 +47,7 @@
#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
#define IEEE802154_EXTENDED_ADDR_LEN 8
+#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
@@ -218,6 +219,7 @@ enum {
/* frame control handling */
#define IEEE802154_FCTL_FTYPE 0x0003
#define IEEE802154_FCTL_ACKREQ 0x0020
+#define IEEE802154_FCTL_SECEN 0x0004
#define IEEE802154_FCTL_INTRA_PAN 0x0040
#define IEEE802154_FTYPE_DATA 0x0001
@@ -233,6 +235,15 @@ static inline int ieee802154_is_data(__le16 fc)
}
/**
+ * ieee802154_is_secen - check if Security bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_secen(__le16 fc)
+{
+ return fc & cpu_to_le16(IEEE802154_FCTL_SECEN);
+}
+
+/**
* ieee802154_is_ackreq - check if acknowledgment request bit is set
* @fc: frame control bytes in little-endian byteorder
*/
@@ -260,17 +271,17 @@ static inline bool ieee802154_is_intra_pan(__le16 fc)
*
* @len: psdu len with (MHR + payload + MFR)
*/
-static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+static inline bool ieee802154_is_valid_psdu_len(u8 len)
{
return (len == IEEE802154_ACK_PSDU_LEN ||
(len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
}
/**
- * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * ieee802154_is_valid_extended_unicast_addr - check if extended addr is valid
* @addr: extended addr to check
*/
-static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
+static inline bool ieee802154_is_valid_extended_unicast_addr(__le64 addr)
{
/* Bail out if the address is all zero, or if the group
* address bit is set.
@@ -280,6 +291,34 @@ static inline bool ieee802154_is_valid_extended_unicast_addr(const __le64 addr)
}
/**
+ * ieee802154_is_broadcast_short_addr - check if short addr is broadcast
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_broadcast_short_addr(__le16 addr)
+{
+ return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST));
+}
+
+/**
+ * ieee802154_is_unspec_short_addr - check if short addr is unspecified
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_unspec_short_addr(__le16 addr)
+{
+ return (addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC));
+}
+
+/**
+ * ieee802154_is_valid_src_short_addr - check if source short address is valid
+ * @addr: short addr to check
+ */
+static inline bool ieee802154_is_valid_src_short_addr(__le16 addr)
+{
+ return !(ieee802154_is_broadcast_short_addr(addr) ||
+ ieee802154_is_unspec_short_addr(addr));
+}
+
+/**
* ieee802154_random_extended_addr - generates a random extended address
* @addr: extended addr pointer to place the random address
*/
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d5569734f672..548fd535fd02 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/ima.h b/include/linux/ima.h
index e6516cbbe9bf..0eb7c2e7f0d6 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -21,6 +21,7 @@ extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
+extern void ima_post_path_mknod(struct dentry *dentry);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -54,6 +55,11 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
return 0;
}
+static inline void ima_post_path_mknod(struct dentry *dentry)
+{
+ return;
+}
+
#endif /* CONFIG_IMA */
#ifdef CONFIG_IMA_APPRAISE
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index 11d7e840d913..defcc4644ce3 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
writel(val, addr);
}
+static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ high = readl_relaxed(p + 1);
+ low = readl_relaxed(p);
+
+ return low + ((u64)high << 32);
+}
+
+static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
+{
+ writel_relaxed(val >> 32, addr + 4);
+ writel_relaxed(val, addr);
+}
+
#ifndef readq
#define readq hi_lo_readq
#endif
@@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
#define writeq hi_lo_writeq
#endif
+#ifndef readq_relaxed
+#define readq_relaxed hi_lo_readq_relaxed
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed hi_lo_writeq_relaxed
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 1a4315f97360..084461a4e5ab 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
writel(val >> 32, addr + 4);
}
+static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ low = readl_relaxed(p);
+ high = readl_relaxed(p + 1);
+
+ return low + ((u64)high << 32);
+}
+
+static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
+{
+ writel_relaxed(val, addr);
+ writel_relaxed(val >> 32, addr + 4);
+}
+
#ifndef readq
#define readq lo_hi_readq
#endif
@@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
#define writeq lo_hi_writeq
#endif
+#ifndef readq_relaxed
+#define readq_relaxed lo_hi_readq_relaxed
+#endif
+
+#ifndef writeq_relaxed
+#define writeq_relaxed lo_hi_writeq_relaxed
+#endif
+
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a5c539fa5d2b..664683aedcce 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -30,6 +30,7 @@
#define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3)
+#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
struct iommu_ops;
struct iommu_group;
@@ -78,6 +79,7 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
@@ -155,8 +157,7 @@ struct iommu_dm_region {
* @domain_set_windows: Set the number of windows for a domain
* @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
- * @pgsize_bitmap: bitmap of supported page sizes
- * @priv: per-instance data private to the iommu driver
+ * @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -195,12 +196,9 @@ struct iommu_ops {
/* Get the number of windows per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
-#ifdef CONFIG_OF_IOMMU
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
-#endif
unsigned long pgsize_bitmap;
- void *priv;
};
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 0b65543dc6cf..6230064d7f95 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -26,6 +26,9 @@ struct resource {
/*
* IO resources have these defined flags.
+ *
+ * PCI devices expose these flags to userspace in the "resource" sysfs file,
+ * so don't move them.
*/
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
@@ -110,6 +113,7 @@ struct resource {
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */
/*
* I/O Resource Descriptors
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7edc14fb66b6..5c91b0b055d4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -63,7 +63,8 @@ struct ipv6_devconf {
} stable_secret;
__s32 use_oif_addrs_only;
__s32 keep_addr_on_down;
- void *sysctl;
+
+ struct ctl_table_header *sysctl_header;
};
struct ipv6_params {
@@ -117,14 +118,29 @@ struct inet6_skb_parm {
#define IP6SKB_ROUTERALERT 8
#define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32
+#define IP6SKB_L3SLAVE 64
};
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+ return flags & IP6SKB_L3SLAVE;
+}
+#else
+static inline bool skb_l3mdev_slave(__u16 flags)
+{
+ return false;
+}
+#endif
+
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb))
static inline int inet6_iif(const struct sk_buff *skb)
{
- return IP6CB(skb)->iif;
+ bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+
+ return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
struct tcp6_request_sock {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c4de62348ff2..4d758a7c604a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -530,6 +530,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
}
extern int irq_set_percpu_devid(unsigned int irq);
+extern int irq_set_percpu_devid_partition(unsigned int irq,
+ const struct cpumask *affinity);
+extern int irq_get_percpu_devid_partition(unsigned int irq,
+ struct cpumask *affinity);
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 1551b5b2f4c2..f0f5d2671509 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -34,7 +34,7 @@ struct irq_bypass_consumer;
/**
* struct irq_bypass_producer - IRQ bypass producer definition
* @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer
+ * @token: opaque token to match between producer and consumer (non-NULL)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -60,7 +60,7 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
* @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer
+ * @token: opaque token to match between producer and consumer (non-NULL)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
new file mode 100644
index 000000000000..c647b0547bcd
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -0,0 +1,34 @@
+/*
+ * include/linux/irqchip/arm-gic-common.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
+#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ GIC_V2,
+ GIC_V3,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* Virtual control interface */
+ struct resource vctrl;
+};
+
+const struct gic_kvm_info *gic_get_kvm_info(void);
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_COMMON_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index d5d798b35c1f..9e6fdd33bdb2 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -102,8 +102,6 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
-#define GICR_ISACTIVER GICD_ISACTIVER
-#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
new file mode 100644
index 000000000000..87433a5d1285
--- /dev/null
+++ b/include/linux/irqchip/irq-partition-percpu.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fwnode.h>
+#include <linux/cpumask.h>
+#include <linux/irqdomain.h>
+
+struct partition_affinity {
+ cpumask_t mask;
+ void *partition_id;
+};
+
+struct partition_desc;
+
+#ifdef CONFIG_PARTITION_PERCPU
+int partition_translate_id(struct partition_desc *desc, void *partition_id);
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops);
+struct irq_domain *partition_get_domain(struct partition_desc *dsc);
+#else
+static inline int partition_translate_id(struct partition_desc *desc,
+ void *partition_id)
+{
+ return -EINVAL;
+}
+
+static inline
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops)
+{
+ return NULL;
+}
+
+static inline
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+ return NULL;
+}
+#endif
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 80f89e4a29ac..81f930b0bca9 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -103,6 +103,7 @@
#define GIC_VPE_SWINT0_MAP_OFS 0x0054
#define GIC_VPE_SWINT1_MAP_OFS 0x0058
#define GIC_VPE_OTHER_ADDR_OFS 0x0080
+#define GIC_VP_IDENT_OFS 0x0088
#define GIC_VPE_WD_CONFIG0_OFS 0x0090
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
@@ -211,6 +212,10 @@
#define GIC_VPE_SMASK_FDC_SHF 6
#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
+/* GIC_VP_IDENT fields */
+#define GIC_VP_IDENT_VCNUM_SHF 0
+#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF)
+
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
#define GIC_CPU_INT1 1 /* . */
@@ -278,4 +283,16 @@ static inline int gic_get_usm_range(struct resource *gic_usm_res)
#endif /* CONFIG_MIPS_GIC */
+/**
+ * gic_read_local_vp_id() - read the local VPs VCNUM
+ *
+ * Read the VCNUM of the local VP from the GIC_VP_IDENT register and
+ * return it to the caller. This ID should be used to refer to the VP
+ * via the GICs VP-other region, or when calculating an offset to a
+ * bit representing the VP in interrupt masks.
+ *
+ * Return: The VCNUM value for the local VP.
+ */
+extern unsigned gic_read_local_vp_id(void);
+
#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index dcca77c4b9d2..b51beebf9804 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -66,6 +66,7 @@ struct irq_desc {
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
+ const struct cpumask *percpu_affinity;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 2aed04396210..f1f36e04d885 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -96,6 +96,8 @@ enum irq_domain_bus_token {
struct irq_domain_ops {
int (*match)(struct irq_domain *d, struct device_node *node,
enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
void (*unmap)(struct irq_domain *d, unsigned int virq);
int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -211,7 +213,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data);
-extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
@@ -227,6 +229,17 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
return fwnode && fwnode->type == FWNODE_IRQCHIP;
}
+static inline
+struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
+{
+ struct irq_fwspec fwspec = {
+ .fwnode = fwnode,
+ };
+
+ return irq_find_matching_fwspec(&fwspec, bus_token);
+}
+
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
@@ -346,9 +359,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
/* IPI functions */
-unsigned int irq_reserve_ipi(struct irq_domain *domain,
- const struct cpumask *dest);
-void irq_destroy_ipi(unsigned int irq);
+int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
+int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
index 548d55395488..10923d730486 100644
--- a/include/linux/iscsi_boot_sysfs.h
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -64,6 +64,12 @@ enum iscsi_boot_initiator_properties_enum {
ISCSI_BOOT_INI_END_MARKER,
};
+enum iscsi_boot_acpitbl_properties_enum {
+ ISCSI_BOOT_ACPITBL_SIGNATURE,
+ ISCSI_BOOT_ACPITBL_OEM_ID,
+ ISCSI_BOOT_ACPITBL_OEM_TABLE_ID,
+};
+
struct attribute_group;
struct iscsi_boot_kobj {
@@ -127,6 +133,13 @@ iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
umode_t (*is_visible) (void *data, int type),
void (*release) (void *data));
+struct iscsi_boot_kobj *
+iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show)(void *data, int type, char *buf),
+ umode_t (*is_visible)(void *data, int type),
+ void (*release)(void *data));
+
struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2f7775e229b0..cc7398287fdd 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -412,9 +412,9 @@ extern __printf(3, 4)
int scnprintf(char *buf, size_t size, const char *fmt, ...);
extern __printf(3, 0)
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3)
+extern __printf(2, 3) __malloc
char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0)
+extern __printf(2, 0) __malloc
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index c06c44242f39..30f089ebe0a4 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -152,6 +152,8 @@ struct kernfs_syscall_ops {
int (*rmdir)(struct kernfs_node *kn);
int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name);
+ int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
+ struct kernfs_root *root);
};
struct kernfs_root {
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7463355a198b..eaee981c5558 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,7 +45,6 @@ struct key_preparsed_payload {
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
time_t expiry; /* Expiry time of key */
- bool trusted; /* True if key is trusted */
};
typedef int (*request_key_actor_t)(struct key_construction *key,
diff --git a/include/linux/key.h b/include/linux/key.h
index 5f5b1129dc92..722914798f37 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -173,11 +173,9 @@ struct key {
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
-#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
-#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
-#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP 12 /* set if key should not be removed */
+#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -205,6 +203,20 @@ struct key {
};
int reject_error;
};
+
+ /* This is set on a keyring to restrict the addition of a link to a key
+ * to it. If this method isn't provided then it is assumed that the
+ * keyring is open to any addition. It is ignored for non-keyring
+ * keys.
+ *
+ * This is intended for use with rings of trusted keys whereby addition
+ * to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION
+ * overrides this, allowing the kernel to add extra keys without
+ * restriction.
+ */
+ int (*restrict_link)(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
};
extern struct key *key_alloc(struct key_type *type,
@@ -212,14 +224,17 @@ extern struct key *key_alloc(struct key_type *type,
kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
- unsigned long flags);
+ unsigned long flags,
+ int (*restrict_link)(struct key *,
+ const struct key_type *,
+ const union key_payload *));
-#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
-#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
-#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
-#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
-#define KEY_ALLOC_BUILT_IN 0x0008 /* Key is built into kernel */
+#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
+#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
+#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
+#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -288,8 +303,15 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
+ int (*restrict_link)(struct key *,
+ const struct key_type *,
+ const union key_payload *),
struct key *dest);
+extern int restrict_link_reject(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload);
+
extern int keyring_clear(struct key *keyring);
extern key_ref_t keyring_search(key_ref_t keyring,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5276fe0916fc..b1fa8f11c95b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,10 @@
#include <asm/kvm_host.h>
+#ifndef KVM_MAX_VCPU_ID
+#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#endif
+
/*
* The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
* in kvm, other bits are visible for userspace which are defined in
@@ -225,6 +229,7 @@ struct kvm_vcpu {
sigset_t sigset;
struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
+ bool valid_wakeup;
#ifdef CONFIG_HAS_IOMEM
int mmio_needed;
@@ -447,12 +452,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL;
int i;
- if (id < 0 || id >= KVM_MAX_VCPUS)
+ if (id < 0)
return NULL;
- vcpu = kvm_get_vcpu(kvm, id);
+ if (id < KVM_MAX_VCPUS)
+ vcpu = kvm_get_vcpu(kvm, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -651,6 +657,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -1091,6 +1098,11 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
+ /*
+ * Ensure the rest of the request is published to kvm_check_request's
+ * caller. Paired with the smp_mb__after_atomic in kvm_check_request.
+ */
+ smp_wmb();
set_bit(req, &vcpu->requests);
}
@@ -1098,6 +1110,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
if (test_bit(req, &vcpu->requests)) {
clear_bit(req, &vcpu->requests);
+
+ /*
+ * Ensure the rest of the request is visible to kvm_check_request's
+ * caller. Paired with the smp_wmb in kvm_make_request.
+ */
+ smp_mb__after_atomic();
return true;
} else {
return false;
@@ -1169,6 +1187,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
@@ -1179,4 +1198,18 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
+/* If we wakeup during the poll time, was it a sucessful poll? */
+static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
+{
+ return vcpu->valid_wakeup;
+}
+
+#else
+static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+
#endif
diff --git a/include/linux/leds.h b/include/linux/leds.h
index f203a8f89d30..d2b13066e781 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -50,6 +50,7 @@ struct led_classdev {
#define LED_SYSFS_DISABLE (1 << 22)
#define LED_DEV_CAP_FLASH (1 << 23)
#define LED_HW_PLUGGABLE (1 << 24)
+#define LED_PANIC_INDICATOR (1 << 25)
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -329,6 +330,12 @@ extern void ledtrig_ide_activity(void);
static inline void ledtrig_ide_activity(void) {}
#endif
+#ifdef CONFIG_LEDS_TRIGGER_MTD
+extern void ledtrig_mtd_activity(void);
+#else
+static inline void ledtrig_mtd_activity(void) {}
+#endif
+
#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
extern void ledtrig_flash_ctrl(bool on);
extern void ledtrig_torch_ctrl(bool on);
@@ -358,6 +365,7 @@ struct gpio_led {
unsigned gpio;
unsigned active_low : 1;
unsigned retain_state_suspended : 1;
+ unsigned panic_indicator : 1;
unsigned default_state : 2;
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index cdcb2ccbefa8..ef2c7d2e76c4 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -18,7 +18,7 @@ enum {
#define NVM_SEC_BITS (8)
#define NVM_PL_BITS (8)
#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (8)
+#define NVM_CH_BITS (7)
struct ppa_addr {
/* Generic structure for all addresses */
@@ -30,8 +30,14 @@ struct ppa_addr {
u64 pl : NVM_PL_BITS;
u64 lun : NVM_LUN_BITS;
u64 ch : NVM_CH_BITS;
+ u64 reserved : 1;
} g;
+ struct {
+ u64 line : 63;
+ u64 is_cached : 1;
+ } c;
+
u64 ppa;
};
};
@@ -41,13 +47,11 @@ struct nvm_id;
struct nvm_dev;
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
- nvm_bb_update_fn *, void *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -202,6 +206,7 @@ struct nvm_id {
struct nvm_target {
struct list_head list;
+ struct nvm_dev *dev;
struct nvm_tgt_type *type;
struct gendisk *disk;
};
@@ -232,14 +237,14 @@ struct nvm_rq {
struct ppa_addr *ppa_list;
- void *metadata;
- dma_addr_t dma_metadata;
+ void *meta_list;
+ dma_addr_t dma_meta_list;
struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
- uint16_t nr_pages;
+ uint16_t nr_ppas;
uint16_t flags;
u64 ppa_status; /* ppa media status */
@@ -307,7 +312,6 @@ struct nvm_dev {
struct nvm_dev_ops *ops;
struct list_head devices;
- struct list_head online_targets;
/* Media manager */
struct nvmm_type *mt;
@@ -323,6 +327,8 @@ struct nvm_dev {
int sec_per_pg; /* only sectors for a single page */
int pgs_per_blk;
int blks_per_lun;
+ int fpg_size;
+ int pfpg_size; /* size of buffer if all pages are to be read */
int sec_size;
int oob_size;
int mccap;
@@ -345,10 +351,9 @@ struct nvm_dev {
unsigned long total_blocks;
unsigned long total_secs;
int nr_luns;
- unsigned max_pages_per_blk;
unsigned long *lun_map;
- void *ppalist_pool;
+ void *dma_pool;
struct nvm_id identity;
@@ -450,8 +455,8 @@ struct nvm_tgt_type {
struct list_head list;
};
-extern int nvm_register_target(struct nvm_tgt_type *);
-extern void nvm_unregister_target(struct nvm_tgt_type *);
+extern int nvm_register_tgt_type(struct nvm_tgt_type *);
+extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
@@ -467,6 +472,7 @@ typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
+typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
typedef void (nvmm_release_lun)(struct nvm_dev *, int);
@@ -494,6 +500,9 @@ struct nvmm_type {
nvmm_submit_io_fn *submit_io;
nvmm_erase_blk_fn *erase_blk;
+ /* Bad block mgmt */
+ nvmm_mark_blk_fn *mark_blk;
+
/* Configuration management */
nvmm_get_lun_fn *get_lun;
nvmm_reserve_lun *reserve_lun;
@@ -527,13 +536,17 @@ extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
- struct ppa_addr *, int);
+ struct ppa_addr *, int, int);
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
extern void nvm_end_io(struct nvm_rq *, int);
extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
void *, int);
+extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
+ int, void *, int);
+extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
+extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
/* sysblk.c */
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
@@ -554,6 +567,13 @@ extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
extern int nvm_dev_factory(struct nvm_dev *, int flags);
+
+#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \
+ for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \
+ (chid)++, (ppa).g.ch = (chid)) \
+ for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \
+ (lunid)++, (ppa).g.lun = (lunid))
+
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index bd830d590465..a93a0b23dc8d 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -65,27 +65,8 @@ struct klp_func {
};
/**
- * struct klp_reloc - relocation structure for live patching
- * @loc: address where the relocation will be written
- * @sympos: position in kallsyms to disambiguate symbols (optional)
- * @type: ELF relocation type
- * @name: name of the referenced symbol (for lookup/verification)
- * @addend: offset from the referenced symbol
- * @external: symbol is either exported or within the live patch module itself
- */
-struct klp_reloc {
- unsigned long loc;
- unsigned long sympos;
- unsigned long type;
- const char *name;
- int addend;
- int external;
-};
-
-/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
- * @relocs: relocation entries to be applied at load time
* @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources
* @mod: kernel module associated with the patched object
@@ -95,7 +76,6 @@ struct klp_reloc {
struct klp_object {
/* external */
const char *name;
- struct klp_reloc *relocs;
struct klp_func *funcs;
/* internal */
@@ -124,10 +104,12 @@ struct klp_patch {
};
#define klp_for_each_object(patch, obj) \
- for (obj = patch->objs; obj->funcs; obj++)
+ for (obj = patch->objs; obj->funcs || obj->name; obj++)
#define klp_for_each_func(obj, func) \
- for (func = obj->funcs; func->old_name; func++)
+ for (func = obj->funcs; \
+ func->old_name || func->new_func || func->old_sympos; \
+ func++)
int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..eabe0138eb06 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
- u8 irq_context;
- u8 depth;
- u16 base;
+ /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ unsigned int irq_context : 2,
+ depth : 6,
+ base : 24;
+ /* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
@@ -354,8 +356,13 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
-extern void lock_pin_lock(struct lockdep_map *lock);
-extern void lock_unpin_lock(struct lockdep_map *lock);
+struct pin_cookie { unsigned int val; };
+
+#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
+
+extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
+extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
+extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
@@ -371,8 +378,9 @@ extern void lock_unpin_lock(struct lockdep_map *lock);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
-#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
-#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
+#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
+#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
+#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
#else /* !CONFIG_LOCKDEP */
@@ -425,8 +433,13 @@ struct lock_class_key { };
#define lockdep_recursing(tsk) (0)
-#define lockdep_pin_lock(l) do { (void)(l); } while (0)
-#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
+struct pin_cookie { };
+
+#define NIL_COOKIE (struct pin_cookie){ }
+
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#endif /* !LOCKDEP */
@@ -444,6 +457,18 @@ do { \
lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+({ \
+ int ____err = 0; \
+ if (!try(_lock)) { \
+ lock_contended(&(_lock)->dep_map, _RET_IP_); \
+ ____err = lock(_lock); \
+ } \
+ if (!____err) \
+ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
+ ____err; \
+})
+
#else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0)
@@ -452,6 +477,9 @@ do { \
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
+#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
+ lock(_lock)
+
#endif /* CONFIG_LOCK_STAT */
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index cdee11cbcdf1..7ae397669d8b 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1190,7 +1190,8 @@
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
- * struct timespec and timezone are defined in include/linux/time.h
+ * struct timespec64 is defined in include/linux/time64.h and timezone
+ * is defined in include/linux/time.h
* @ts contains new time
* @tz contains new timezone
* Return 0 if permission is granted.
@@ -1327,7 +1328,7 @@ union security_list_options {
int (*quotactl)(int cmds, int type, int id, struct super_block *sb);
int (*quota_on)(struct dentry *dentry);
int (*syslog)(int type);
- int (*settime)(const struct timespec *ts, const struct timezone *tz);
+ int (*settime)(const struct timespec64 *ts, const struct timezone *tz);
int (*vm_enough_memory)(struct mm_struct *mm, long pages);
int (*bprm_set_creds)(struct linux_binprm *bprm);
@@ -1343,10 +1344,10 @@ union security_list_options {
int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
int (*sb_statfs)(struct dentry *dentry);
- int (*sb_mount)(const char *dev_name, struct path *path,
+ int (*sb_mount)(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data);
int (*sb_umount)(struct vfsmount *mnt, int flags);
- int (*sb_pivotroot)(struct path *old_path, struct path *new_path);
+ int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
int (*sb_set_mnt_opts)(struct super_block *sb,
struct security_mnt_opts *opts,
unsigned long kern_flags,
@@ -1360,23 +1361,23 @@ union security_list_options {
#ifdef CONFIG_SECURITY_PATH
- int (*path_unlink)(struct path *dir, struct dentry *dentry);
- int (*path_mkdir)(struct path *dir, struct dentry *dentry,
+ int (*path_unlink)(const struct path *dir, struct dentry *dentry);
+ int (*path_mkdir)(const struct path *dir, struct dentry *dentry,
umode_t mode);
- int (*path_rmdir)(struct path *dir, struct dentry *dentry);
- int (*path_mknod)(struct path *dir, struct dentry *dentry,
+ int (*path_rmdir)(const struct path *dir, struct dentry *dentry);
+ int (*path_mknod)(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev);
- int (*path_truncate)(struct path *path);
- int (*path_symlink)(struct path *dir, struct dentry *dentry,
+ int (*path_truncate)(const struct path *path);
+ int (*path_symlink)(const struct path *dir, struct dentry *dentry,
const char *old_name);
- int (*path_link)(struct dentry *old_dentry, struct path *new_dir,
+ int (*path_link)(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry);
- int (*path_rename)(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir,
+ int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry,
+ const struct path *new_dir,
struct dentry *new_dentry);
- int (*path_chmod)(struct path *path, umode_t mode);
- int (*path_chown)(struct path *path, kuid_t uid, kgid_t gid);
- int (*path_chroot)(struct path *path);
+ int (*path_chmod)(const struct path *path, umode_t mode);
+ int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid);
+ int (*path_chroot)(const struct path *path);
#endif
int (*inode_alloc_security)(struct inode *inode);
@@ -1804,7 +1805,6 @@ struct security_hook_heads {
struct list_head tun_dev_attach_queue;
struct list_head tun_dev_attach;
struct list_head tun_dev_open;
- struct list_head skb_owned_by;
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
struct list_head xfrm_policy_alloc_security;
@@ -1893,5 +1893,10 @@ extern void __init yama_add_hooks(void);
#else
static inline void __init yama_add_hooks(void) { }
#endif
+#ifdef CONFIG_SECURITY_LOADPIN
+void __init loadpin_add_hooks(void);
+#else
+static inline void loadpin_add_hooks(void) { };
+#endif
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 5bfd99d1a40a..bf9d1d750693 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -13,6 +13,17 @@
struct mii_bus;
+/* Multiple levels of nesting are possible. However typically this is
+ * limited to nested DSA like layer, a MUX layer, and the normal
+ * user. Instead of trying to handle the general case, just define
+ * these cases.
+ */
+enum mdio_mutex_lock_class {
+ MDIO_MUTEX_NORMAL,
+ MDIO_MUTEX_MUX,
+ MDIO_MUTEX_NESTED,
+};
+
struct mdio_device {
struct device dev;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1191d79aa495..94da96738df3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -658,12 +658,6 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
return 0;
}
-static inline void
-mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int increment)
-{
-}
-
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index adbef586e696..20d8a5d4d133 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -247,16 +247,16 @@ static inline void mem_hotplug_done(void) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
+extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
#else
-static inline int is_mem_section_removable(unsigned long pfn,
+static inline bool is_mem_section_removable(unsigned long pfn,
unsigned long nr_pages)
{
- return 0;
+ return false;
}
static inline void try_offline_node(int nid) {}
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 2696c1f05ed1..4429d255c8ab 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -172,14 +172,14 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
-static inline int vma_migratable(struct vm_area_struct *vma)
+static inline bool vma_migratable(struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- return 0;
+ return false;
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
if (vma->vm_flags & VM_HUGETLB)
- return 0;
+ return false;
#endif
/*
@@ -190,8 +190,8 @@ static inline int vma_migratable(struct vm_area_struct *vma)
if (vma->vm_file &&
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
< policy_zone)
- return 0;
- return 1;
+ return false;
+ return true;
}
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
@@ -228,6 +228,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
{
}
+static inline struct mempolicy *
+mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+{
+ return NULL;
+}
+
#define vma_policy(vma) NULL
static inline int
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 69b6951e8fd2..b1086c936507 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -5,6 +5,7 @@
#define _LINUX_MEMPOOL_H
#include <linux/wait.h>
+#include <linux/compiler.h>
struct kmem_cache;
@@ -31,7 +32,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
-extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
+extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
index 8d43e9f2a842..51e6f9414575 100644
--- a/include/linux/mfd/as3722.h
+++ b/include/linux/mfd/as3722.h
@@ -196,6 +196,7 @@
#define AS3722_LDO3_VSEL_MIN 0x01
#define AS3722_LDO3_VSEL_MAX 0x2D
#define AS3722_LDO3_NUM_VOLT 0x2D
+#define AS3722_LDO6_VSEL_BYPASS 0x3F
#define AS3722_LDO_VSEL_MASK 0x7F
#define AS3722_LDO_VSEL_MIN 0x01
#define AS3722_LDO_VSEL_MAX 0x7F
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index d82e7d51372b..0be4982f08fe 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -20,6 +20,7 @@ enum {
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP809_ID,
NR_AXP20X_VARIANTS,
};
@@ -264,6 +265,29 @@ enum {
AXP22X_REG_ID_MAX,
};
+enum {
+ AXP809_DCDC1 = 0,
+ AXP809_DCDC2,
+ AXP809_DCDC3,
+ AXP809_DCDC4,
+ AXP809_DCDC5,
+ AXP809_DC1SW,
+ AXP809_DC5LDO,
+ AXP809_ALDO1,
+ AXP809_ALDO2,
+ AXP809_ALDO3,
+ AXP809_ELDO1,
+ AXP809_ELDO2,
+ AXP809_ELDO3,
+ AXP809_DLDO1,
+ AXP809_DLDO2,
+ AXP809_RTC_LDO,
+ AXP809_LDO_IO0,
+ AXP809_LDO_IO1,
+ AXP809_SW,
+ AXP809_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
@@ -390,6 +414,41 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp809_irqs {
+ AXP809_IRQ_ACIN_OVER_V = 1,
+ AXP809_IRQ_ACIN_PLUGIN,
+ AXP809_IRQ_ACIN_REMOVAL,
+ AXP809_IRQ_VBUS_OVER_V,
+ AXP809_IRQ_VBUS_PLUGIN,
+ AXP809_IRQ_VBUS_REMOVAL,
+ AXP809_IRQ_VBUS_V_LOW,
+ AXP809_IRQ_BATT_PLUGIN,
+ AXP809_IRQ_BATT_REMOVAL,
+ AXP809_IRQ_BATT_ENT_ACT_MODE,
+ AXP809_IRQ_BATT_EXIT_ACT_MODE,
+ AXP809_IRQ_CHARG,
+ AXP809_IRQ_CHARG_DONE,
+ AXP809_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP809_IRQ_BATT_CHG_TEMP_HIGH_END,
+ AXP809_IRQ_BATT_CHG_TEMP_LOW,
+ AXP809_IRQ_BATT_CHG_TEMP_LOW_END,
+ AXP809_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP809_IRQ_BATT_ACT_TEMP_HIGH_END,
+ AXP809_IRQ_BATT_ACT_TEMP_LOW,
+ AXP809_IRQ_BATT_ACT_TEMP_LOW_END,
+ AXP809_IRQ_DIE_TEMP_HIGH,
+ AXP809_IRQ_LOW_PWR_LVL1,
+ AXP809_IRQ_LOW_PWR_LVL2,
+ AXP809_IRQ_TIMER,
+ AXP809_IRQ_PEK_RIS_EDGE,
+ AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_SHORT,
+ AXP809_IRQ_PEK_LONG,
+ AXP809_IRQ_PEK_OVER_OFF,
+ AXP809_IRQ_GPIO1_INPUT,
+ AXP809_IRQ_GPIO0_INPUT,
+};
+
#define AXP288_TS_ADC_H 0x58
#define AXP288_TS_ADC_L 0x59
#define AXP288_GP_ADC_H 0x5a
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index bc6f7e00fb3d..99c0395fe1f9 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
struct irq_domain;
-struct property_set;
+struct property_entry;
/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */
struct mfd_cell_acpi_match {
@@ -47,7 +47,7 @@ struct mfd_cell {
size_t pdata_size;
/* device properties passed to the sub devices drivers */
- const struct property_set *pset;
+ struct property_entry *properties;
/*
* Device Tree compatible string
@@ -131,4 +131,8 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
extern void mfd_remove_devices(struct device *parent);
+extern int devm_mfd_add_devices(struct device *dev, int id,
+ const struct mfd_cell *cells, int n_devs,
+ struct resource *mem_base,
+ int irq_base, struct irq_domain *irq_domain);
#endif
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
new file mode 100644
index 000000000000..dbbe9a644622
--- /dev/null
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -0,0 +1,55 @@
+/*
+ * Device driver for regulators in hi655x IC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __HI655X_PMIC_H
+#define __HI655X_PMIC_H
+
+/* Hi655x registers are mapped to memory bus in 4 bytes stride */
+#define HI655X_STRIDE 4
+#define HI655X_BUS_ADDR(x) ((x) << 2)
+
+#define HI655X_BITS 8
+
+#define HI655X_NR_IRQ 32
+
+#define HI655X_IRQ_STAT_BASE (0x003 << 2)
+#define HI655X_IRQ_MASK_BASE (0x007 << 2)
+#define HI655X_ANA_IRQM_BASE (0x1b5 << 2)
+#define HI655X_IRQ_ARRAY 4
+#define HI655X_IRQ_MASK 0xFF
+#define HI655X_IRQ_CLR 0xFF
+#define HI655X_VER_REG 0x00
+
+#define PMU_VER_START 0x10
+#define PMU_VER_END 0x38
+
+#define RESERVE_INT BIT(7)
+#define PWRON_D20R_INT BIT(6)
+#define PWRON_D20F_INT BIT(5)
+#define PWRON_D4SR_INT BIT(4)
+#define VSYS_6P0_D200UR_INT BIT(3)
+#define VSYS_UV_D3R_INT BIT(2)
+#define VSYS_2P5_R_INT BIT(1)
+#define OTMP_D1R_INT BIT(0)
+
+struct hi655x_pmic {
+ struct resource *res;
+ struct device *dev;
+ struct regmap *regmap;
+ int gpio;
+ unsigned int ver;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
new file mode 100644
index 000000000000..3ca0af07fc78
--- /dev/null
+++ b/include/linux/mfd/max77620.h
@@ -0,0 +1,346 @@
+/*
+ * Defining registers address and its bit definitions of MAX77620 and MAX20024
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _MFD_MAX77620_H_
+#define _MFD_MAX77620_H_
+
+#include <linux/types.h>
+
+/* GLOBAL, PMIC, GPIO, FPS, ONOFFC, CID Registers */
+#define MAX77620_REG_CNFGGLBL1 0x00
+#define MAX77620_REG_CNFGGLBL2 0x01
+#define MAX77620_REG_CNFGGLBL3 0x02
+#define MAX77620_REG_CNFG1_32K 0x03
+#define MAX77620_REG_CNFGBBC 0x04
+#define MAX77620_REG_IRQTOP 0x05
+#define MAX77620_REG_INTLBT 0x06
+#define MAX77620_REG_IRQSD 0x07
+#define MAX77620_REG_IRQ_LVL2_L0_7 0x08
+#define MAX77620_REG_IRQ_LVL2_L8 0x09
+#define MAX77620_REG_IRQ_LVL2_GPIO 0x0A
+#define MAX77620_REG_ONOFFIRQ 0x0B
+#define MAX77620_REG_NVERC 0x0C
+#define MAX77620_REG_IRQTOPM 0x0D
+#define MAX77620_REG_INTENLBT 0x0E
+#define MAX77620_REG_IRQMASKSD 0x0F
+#define MAX77620_REG_IRQ_MSK_L0_7 0x10
+#define MAX77620_REG_IRQ_MSK_L8 0x11
+#define MAX77620_REG_ONOFFIRQM 0x12
+#define MAX77620_REG_STATLBT 0x13
+#define MAX77620_REG_STATSD 0x14
+#define MAX77620_REG_ONOFFSTAT 0x15
+
+/* SD and LDO Registers */
+#define MAX77620_REG_SD0 0x16
+#define MAX77620_REG_SD1 0x17
+#define MAX77620_REG_SD2 0x18
+#define MAX77620_REG_SD3 0x19
+#define MAX77620_REG_SD4 0x1A
+#define MAX77620_REG_DVSSD0 0x1B
+#define MAX77620_REG_DVSSD1 0x1C
+#define MAX77620_REG_SD0_CFG 0x1D
+#define MAX77620_REG_SD1_CFG 0x1E
+#define MAX77620_REG_SD2_CFG 0x1F
+#define MAX77620_REG_SD3_CFG 0x20
+#define MAX77620_REG_SD4_CFG 0x21
+#define MAX77620_REG_SD_CFG2 0x22
+#define MAX77620_REG_LDO0_CFG 0x23
+#define MAX77620_REG_LDO0_CFG2 0x24
+#define MAX77620_REG_LDO1_CFG 0x25
+#define MAX77620_REG_LDO1_CFG2 0x26
+#define MAX77620_REG_LDO2_CFG 0x27
+#define MAX77620_REG_LDO2_CFG2 0x28
+#define MAX77620_REG_LDO3_CFG 0x29
+#define MAX77620_REG_LDO3_CFG2 0x2A
+#define MAX77620_REG_LDO4_CFG 0x2B
+#define MAX77620_REG_LDO4_CFG2 0x2C
+#define MAX77620_REG_LDO5_CFG 0x2D
+#define MAX77620_REG_LDO5_CFG2 0x2E
+#define MAX77620_REG_LDO6_CFG 0x2F
+#define MAX77620_REG_LDO6_CFG2 0x30
+#define MAX77620_REG_LDO7_CFG 0x31
+#define MAX77620_REG_LDO7_CFG2 0x32
+#define MAX77620_REG_LDO8_CFG 0x33
+#define MAX77620_REG_LDO8_CFG2 0x34
+#define MAX77620_REG_LDO_CFG3 0x35
+
+#define MAX77620_LDO_SLEW_RATE_MASK 0x1
+
+/* LDO Configuration 3 */
+#define MAX77620_TRACK4_MASK BIT(5)
+#define MAX77620_TRACK4_SHIFT 5
+
+/* Voltage */
+#define MAX77620_SDX_VOLT_MASK 0xFF
+#define MAX77620_SD0_VOLT_MASK 0x3F
+#define MAX77620_SD1_VOLT_MASK 0x7F
+#define MAX77620_LDO_VOLT_MASK 0x3F
+
+#define MAX77620_REG_GPIO0 0x36
+#define MAX77620_REG_GPIO1 0x37
+#define MAX77620_REG_GPIO2 0x38
+#define MAX77620_REG_GPIO3 0x39
+#define MAX77620_REG_GPIO4 0x3A
+#define MAX77620_REG_GPIO5 0x3B
+#define MAX77620_REG_GPIO6 0x3C
+#define MAX77620_REG_GPIO7 0x3D
+#define MAX77620_REG_PUE_GPIO 0x3E
+#define MAX77620_REG_PDE_GPIO 0x3F
+#define MAX77620_REG_AME_GPIO 0x40
+#define MAX77620_REG_ONOFFCNFG1 0x41
+#define MAX77620_REG_ONOFFCNFG2 0x42
+
+/* FPS Registers */
+#define MAX77620_REG_FPS_CFG0 0x43
+#define MAX77620_REG_FPS_CFG1 0x44
+#define MAX77620_REG_FPS_CFG2 0x45
+#define MAX77620_REG_FPS_LDO0 0x46
+#define MAX77620_REG_FPS_LDO1 0x47
+#define MAX77620_REG_FPS_LDO2 0x48
+#define MAX77620_REG_FPS_LDO3 0x49
+#define MAX77620_REG_FPS_LDO4 0x4A
+#define MAX77620_REG_FPS_LDO5 0x4B
+#define MAX77620_REG_FPS_LDO6 0x4C
+#define MAX77620_REG_FPS_LDO7 0x4D
+#define MAX77620_REG_FPS_LDO8 0x4E
+#define MAX77620_REG_FPS_SD0 0x4F
+#define MAX77620_REG_FPS_SD1 0x50
+#define MAX77620_REG_FPS_SD2 0x51
+#define MAX77620_REG_FPS_SD3 0x52
+#define MAX77620_REG_FPS_SD4 0x53
+#define MAX77620_REG_FPS_NONE 0
+
+#define MAX77620_FPS_SRC_MASK 0xC0
+#define MAX77620_FPS_SRC_SHIFT 6
+#define MAX77620_FPS_PU_PERIOD_MASK 0x38
+#define MAX77620_FPS_PU_PERIOD_SHIFT 3
+#define MAX77620_FPS_PD_PERIOD_MASK 0x07
+#define MAX77620_FPS_PD_PERIOD_SHIFT 0
+#define MAX77620_FPS_TIME_PERIOD_MASK 0x38
+#define MAX77620_FPS_TIME_PERIOD_SHIFT 3
+#define MAX77620_FPS_EN_SRC_MASK 0x06
+#define MAX77620_FPS_EN_SRC_SHIFT 1
+#define MAX77620_FPS_ENFPS_SW_MASK 0x01
+#define MAX77620_FPS_ENFPS_SW 0x01
+
+/* Minimum and maximum FPS period time (in microseconds) are
+ * different for MAX77620 and Max20024.
+ */
+#define MAX77620_FPS_PERIOD_MIN_US 40
+#define MAX20024_FPS_PERIOD_MIN_US 20
+
+#define MAX77620_FPS_PERIOD_MAX_US 2560
+#define MAX20024_FPS_PERIOD_MAX_US 5120
+
+#define MAX77620_REG_FPS_GPIO1 0x54
+#define MAX77620_REG_FPS_GPIO2 0x55
+#define MAX77620_REG_FPS_GPIO3 0x56
+#define MAX77620_REG_FPS_RSO 0x57
+#define MAX77620_REG_CID0 0x58
+#define MAX77620_REG_CID1 0x59
+#define MAX77620_REG_CID2 0x5A
+#define MAX77620_REG_CID3 0x5B
+#define MAX77620_REG_CID4 0x5C
+#define MAX77620_REG_CID5 0x5D
+
+#define MAX77620_REG_DVSSD4 0x5E
+#define MAX20024_REG_MAX_ADD 0x70
+
+#define MAX77620_CID_DIDM_MASK 0xF0
+#define MAX77620_CID_DIDM_SHIFT 4
+
+/* CNCG2SD */
+#define MAX77620_SD_CNF2_ROVS_EN_SD1 BIT(1)
+#define MAX77620_SD_CNF2_ROVS_EN_SD0 BIT(2)
+
+/* Device Identification Metal */
+#define MAX77620_CID5_DIDM(n) (((n) >> 4) & 0xF)
+/* Device Indentification OTP */
+#define MAX77620_CID5_DIDO(n) ((n) & 0xF)
+
+/* SD CNFG1 */
+#define MAX77620_SD_SR_MASK 0xC0
+#define MAX77620_SD_SR_SHIFT 6
+#define MAX77620_SD_POWER_MODE_MASK 0x30
+#define MAX77620_SD_POWER_MODE_SHIFT 4
+#define MAX77620_SD_CFG1_ADE_MASK BIT(3)
+#define MAX77620_SD_CFG1_ADE_DISABLE 0
+#define MAX77620_SD_CFG1_ADE_ENABLE BIT(3)
+#define MAX77620_SD_FPWM_MASK 0x04
+#define MAX77620_SD_FPWM_SHIFT 2
+#define MAX77620_SD_FSRADE_MASK 0x01
+#define MAX77620_SD_FSRADE_SHIFT 0
+#define MAX77620_SD_CFG1_FPWM_SD_MASK BIT(2)
+#define MAX77620_SD_CFG1_FPWM_SD_SKIP 0
+#define MAX77620_SD_CFG1_FPWM_SD_FPWM BIT(2)
+#define MAX77620_SD_CFG1_FSRADE_SD_MASK BIT(0)
+#define MAX77620_SD_CFG1_FSRADE_SD_DISABLE 0
+#define MAX77620_SD_CFG1_FSRADE_SD_ENABLE BIT(0)
+
+/* LDO_CNFG2 */
+#define MAX77620_LDO_POWER_MODE_MASK 0xC0
+#define MAX77620_LDO_POWER_MODE_SHIFT 6
+#define MAX77620_LDO_CFG2_ADE_MASK BIT(1)
+#define MAX77620_LDO_CFG2_ADE_DISABLE 0
+#define MAX77620_LDO_CFG2_ADE_ENABLE BIT(1)
+#define MAX77620_LDO_CFG2_SS_MASK BIT(0)
+#define MAX77620_LDO_CFG2_SS_FAST BIT(0)
+#define MAX77620_LDO_CFG2_SS_SLOW 0
+
+#define MAX77620_IRQ_TOP_GLBL_MASK BIT(7)
+#define MAX77620_IRQ_TOP_SD_MASK BIT(6)
+#define MAX77620_IRQ_TOP_LDO_MASK BIT(5)
+#define MAX77620_IRQ_TOP_GPIO_MASK BIT(4)
+#define MAX77620_IRQ_TOP_RTC_MASK BIT(3)
+#define MAX77620_IRQ_TOP_32K_MASK BIT(2)
+#define MAX77620_IRQ_TOP_ONOFF_MASK BIT(1)
+
+#define MAX77620_IRQ_LBM_MASK BIT(3)
+#define MAX77620_IRQ_TJALRM1_MASK BIT(2)
+#define MAX77620_IRQ_TJALRM2_MASK BIT(1)
+
+#define MAX77620_PWR_I2C_ADDR 0x3c
+#define MAX77620_RTC_I2C_ADDR 0x68
+
+#define MAX77620_CNFG_GPIO_DRV_MASK BIT(0)
+#define MAX77620_CNFG_GPIO_DRV_PUSHPULL BIT(0)
+#define MAX77620_CNFG_GPIO_DRV_OPENDRAIN 0
+#define MAX77620_CNFG_GPIO_DIR_MASK BIT(1)
+#define MAX77620_CNFG_GPIO_DIR_INPUT BIT(1)
+#define MAX77620_CNFG_GPIO_DIR_OUTPUT 0
+#define MAX77620_CNFG_GPIO_INPUT_VAL_MASK BIT(2)
+#define MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK BIT(3)
+#define MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH BIT(3)
+#define MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW 0
+#define MAX77620_CNFG_GPIO_INT_MASK (0x3 << 4)
+#define MAX77620_CNFG_GPIO_INT_FALLING BIT(4)
+#define MAX77620_CNFG_GPIO_INT_RISING BIT(5)
+#define MAX77620_CNFG_GPIO_DBNC_MASK (0x3 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_None (0x0 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_8ms (0x1 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_16ms (0x2 << 6)
+#define MAX77620_CNFG_GPIO_DBNC_32ms (0x3 << 6)
+
+#define MAX77620_IRQ_LVL2_GPIO_EDGE0 BIT(0)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE1 BIT(1)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE2 BIT(2)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE3 BIT(3)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE4 BIT(4)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE5 BIT(5)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE6 BIT(6)
+#define MAX77620_IRQ_LVL2_GPIO_EDGE7 BIT(7)
+
+#define MAX77620_CNFG1_32K_OUT0_EN BIT(2)
+
+#define MAX77620_ONOFFCNFG1_SFT_RST BIT(7)
+#define MAX77620_ONOFFCNFG1_MRT_MASK 0x38
+#define MAX77620_ONOFFCNFG1_MRT_SHIFT 0x3
+#define MAX77620_ONOFFCNFG1_SLPEN BIT(2)
+#define MAX77620_ONOFFCNFG1_PWR_OFF BIT(1)
+#define MAX20024_ONOFFCNFG1_CLRSE 0x18
+
+#define MAX77620_ONOFFCNFG2_SFT_RST_WK BIT(7)
+#define MAX77620_ONOFFCNFG2_WD_RST_WK BIT(6)
+#define MAX77620_ONOFFCNFG2_SLP_LPM_MSK BIT(5)
+#define MAX77620_ONOFFCNFG2_WK_ALARM1 BIT(2)
+#define MAX77620_ONOFFCNFG2_WK_EN0 BIT(0)
+
+#define MAX77620_GLBLM_MASK BIT(0)
+
+#define MAX77620_WDTC_MASK 0x3
+#define MAX77620_WDTOFFC BIT(4)
+#define MAX77620_WDTSLPC BIT(3)
+#define MAX77620_WDTEN BIT(2)
+
+#define MAX77620_TWD_MASK 0x3
+#define MAX77620_TWD_2s 0x0
+#define MAX77620_TWD_16s 0x1
+#define MAX77620_TWD_64s 0x2
+#define MAX77620_TWD_128s 0x3
+
+#define MAX77620_CNFGGLBL1_LBDAC_EN BIT(7)
+#define MAX77620_CNFGGLBL1_MPPLD BIT(6)
+#define MAX77620_CNFGGLBL1_LBHYST (BIT(5) | BIT(4))
+#define MAX77620_CNFGGLBL1_LBDAC 0x0E
+#define MAX77620_CNFGGLBL1_LBRSTEN BIT(0)
+
+/* CNFG BBC registers */
+#define MAX77620_CNFGBBC_ENABLE BIT(0)
+#define MAX77620_CNFGBBC_CURRENT_MASK 0x06
+#define MAX77620_CNFGBBC_CURRENT_SHIFT 1
+#define MAX77620_CNFGBBC_VOLTAGE_MASK 0x18
+#define MAX77620_CNFGBBC_VOLTAGE_SHIFT 3
+#define MAX77620_CNFGBBC_LOW_CURRENT_DISABLE BIT(5)
+#define MAX77620_CNFGBBC_RESISTOR_MASK 0xC0
+#define MAX77620_CNFGBBC_RESISTOR_SHIFT 6
+
+#define MAX77620_FPS_COUNT 3
+
+/* Interrupts */
+enum {
+ MAX77620_IRQ_TOP_GLBL, /* Low-Battery */
+ MAX77620_IRQ_TOP_SD, /* SD power fail */
+ MAX77620_IRQ_TOP_LDO, /* LDO power fail */
+ MAX77620_IRQ_TOP_GPIO, /* TOP GPIO internal int to MAX77620 */
+ MAX77620_IRQ_TOP_RTC, /* RTC */
+ MAX77620_IRQ_TOP_32K, /* 32kHz oscillator */
+ MAX77620_IRQ_TOP_ONOFF, /* ON/OFF oscillator */
+ MAX77620_IRQ_LBT_MBATLOW, /* Thermal alarm status, > 120C */
+ MAX77620_IRQ_LBT_TJALRM1, /* Thermal alarm status, > 120C */
+ MAX77620_IRQ_LBT_TJALRM2, /* Thermal alarm status, > 140C */
+};
+
+/* GPIOs */
+enum {
+ MAX77620_GPIO0,
+ MAX77620_GPIO1,
+ MAX77620_GPIO2,
+ MAX77620_GPIO3,
+ MAX77620_GPIO4,
+ MAX77620_GPIO5,
+ MAX77620_GPIO6,
+ MAX77620_GPIO7,
+ MAX77620_GPIO_NR,
+};
+
+/* FPS Source */
+enum max77620_fps_src {
+ MAX77620_FPS_SRC_0,
+ MAX77620_FPS_SRC_1,
+ MAX77620_FPS_SRC_2,
+ MAX77620_FPS_SRC_NONE,
+ MAX77620_FPS_SRC_DEF,
+};
+
+enum max77620_chip_id {
+ MAX77620,
+ MAX20024,
+};
+
+struct max77620_chip {
+ struct device *dev;
+ struct regmap *rmap;
+
+ int chip_irq;
+ int irq_base;
+
+ /* chip id */
+ enum max77620_chip_id chip_id;
+
+ bool sleep_enable;
+ bool enable_global_lpm;
+ int shutdown_fps_period[MAX77620_FPS_COUNT];
+ int suspend_fps_period[MAX77620_FPS_COUNT];
+
+ struct regmap_irq_chip_data *top_irq_data;
+ struct regmap_irq_chip_data *gpio_irq_data;
+};
+
+#endif /* _MFD_MAX77620_H_ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 6bc4bcd488ac..5a23dd4df432 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -30,6 +30,9 @@
#define MIN_600_MV 600000
#define MIN_500_MV 500000
+/* Ramp delay in uV/us */
+#define RAMP_DELAY_12_MVUS 12000
+
/* Macros to represent steps for LDO/BUCK */
#define STEP_50_MV 50000
#define STEP_25_MV 25000
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b288965e8101..2c14eeca46f0 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
#define S2MPS11_LDO_VSEL_MASK 0x3F
#define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_BUCK9_VSEL_MASK 0x1F
#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
#define S2MPS11_ENABLE_SHIFT 0x06
#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4)
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 1088149be0c9..40a76b97b7ab 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -16,6 +16,7 @@
#define __LINUX_MFD_SYSCON_H__
#include <linux/err.h>
+#include <linux/errno.h>
struct device_node;
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index 238c8db953eb..c8e0164c5423 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -95,6 +95,7 @@
#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0)
#define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30)
+#define IMX6Q_GPR1_PCIE_SW_RST BIT(29)
#define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28)
#define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27)
#define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26)
@@ -447,5 +448,11 @@
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
#define IMX6UL_GPR1_ENET_CLK_OUTPUT (0x3 << 17)
+#define IMX6UL_GPR1_SAI1_MCLK_DIR (0x1 << 19)
+#define IMX6UL_GPR1_SAI2_MCLK_DIR (0x1 << 20)
+#define IMX6UL_GPR1_SAI3_MCLK_DIR (0x1 << 21)
+#define IMX6UL_GPR1_SAI_MCLK_MASK (0x7 << 19)
+#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
+ IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 05d58ee5e6a7..7a26286db895 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -66,8 +66,8 @@
*/
#define TMIO_MMC_SDIO_IRQ (1 << 2)
-/* Some controllers don't need to wait 10ms for clock changes */
-#define TMIO_MMC_FAST_CLK_CHG (1 << 3)
+/* Some features are only available or tested on RCar Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2 (1 << 3)
/*
* Some controllers require waiting for the SD bus to become
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 2de565b94d0c..4ee908f5b834 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -923,7 +923,6 @@ struct wm8400 {
#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
-u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8541a913f6a3..80dec87a94f8 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+enum mlx4_pci_status {
+ MLX4_PCI_STATUS_DISABLED,
+ MLX4_PCI_STATUS_ENABLED,
+};
+
struct mlx4_dev_persistent {
struct pci_dev *pdev;
struct mlx4_dev *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
u8 state;
struct mutex interface_state_mutex; /* protect SW state */
u8 interface_state;
+ struct mutex pci_status_mutex; /* sync pci state */
+ enum mlx4_pci_status pci_status;
};
struct mlx4_dev {
@@ -1051,7 +1058,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ if (buf->nbufs == 1)
return buf->direct.buf + offset;
else
return buf->page_list[offset >> PAGE_SHIFT].buf +
@@ -1091,7 +1098,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct);
+ int size);
void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int size);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index b2c9fada8eac..2be976dd4966 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -53,6 +53,11 @@ struct mlx5_core_cq {
unsigned arm_sn;
struct mlx5_rsc_debug *dbg;
int pid;
+ struct {
+ struct list_head list;
+ void (*comp)(struct mlx5_core_cq *);
+ void *priv;
+ } tasklet_ctx;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8156e3c9239c..035abdf62cfe 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -59,6 +59,7 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
@@ -392,6 +393,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -644,7 +656,9 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[4];
+ u8 outer_l3_tunneled;
+ u8 rsvd0;
+ __be16 wqe_id;
u8 lro_tcppsh_abort_dupack;
u8 lro_min_ttl;
__be16 lro_tcp_win;
@@ -657,7 +671,7 @@ struct mlx5_cqe64 {
__be16 slid;
__be32 flags_rqpn;
u8 hds_ip_ext;
- u8 l4_hdr_type_etc;
+ u8 l4_l3_hdr_type;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
@@ -671,6 +685,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+struct mlx5_mini_cqe8 {
+ union {
+ __be32 rx_hash_result;
+ struct {
+ __be16 checksum;
+ __be16 rsvd;
+ };
+ struct {
+ __be16 wqe_counter;
+ u8 s_wqe_opcode;
+ u8 reserved;
+ } s_wqe_info;
+ };
+ __be32 byte_cnt;
+};
+
+enum {
+ MLX5_NO_INLINE_DATA,
+ MLX5_INLINE_DATA32_SEG,
+ MLX5_INLINE_DATA64_SEG,
+ MLX5_COMPRESSED,
+};
+
+enum {
+ MLX5_CQE_FORMAT_CSUM = 0x1,
+};
+
+#define MLX5_MINI_CQE_ARRAY_SIZE 8
+
+static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->op_own >> 2) & 0x3;
+}
+
static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
@@ -678,12 +726,22 @@ static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+ return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+}
+
+static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+}
+
+static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+ return cqe->outer_l3_tunneled & 0x1;
}
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_hdr_type_etc & 0x1);
+ return !!(cqe->l4_l3_hdr_type & 0x1);
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -696,6 +754,42 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+struct mpwrq_cqe_bc {
+ __be16 filler_consumed_strides;
+ __be16 byte_cnt;
+};
+
+static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return be16_to_cpu(bc->byte_cnt);
+}
+
+static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
+{
+ return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return mpwrq_get_cqe_bc_consumed_strides(bc);
+}
+
+static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->wqe_counter);
+}
+
enum {
CQE_L4_HDR_TYPE_NONE = 0x0,
CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
@@ -1289,6 +1383,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
@@ -1331,6 +1437,7 @@ enum {
MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+ MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index dcd5ac8d3b14..80776d0c52dc 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -41,11 +41,17 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
enum {
+ MLX5_RQ_BITMASK_VSD = 1 << 1,
+};
+
+enum {
MLX5_BOARD_ID_LEN = 64,
MLX5_MAX_NAME_LEN = 16,
};
@@ -112,9 +118,12 @@ enum {
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
- MLX5_REG_PMLP = 0, /* TBD */
+ MLX5_REG_PCMR = 0x5041,
+ MLX5_REG_PMLP = 0x5002,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MLCR = 0x902b,
};
enum {
@@ -304,6 +313,14 @@ struct mlx5_buf {
u8 page_shift;
};
+struct mlx5_eq_tasklet {
+ struct list_head list;
+ struct list_head process_list;
+ struct tasklet_struct task;
+ /* lock on completion tasklet list */
+ spinlock_t lock;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
__be32 __iomem *doorbell;
@@ -317,6 +334,7 @@ struct mlx5_eq {
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
+ struct mlx5_eq_tasklet tasklet_ctx;
};
struct mlx5_core_psv {
@@ -450,6 +468,17 @@ struct mlx5_irq_info {
char name[MLX5_MAX_IRQ_NAME];
};
+struct mlx5_fc_stats {
+ struct list_head list;
+ struct list_head addlist;
+ /* protect addlist add/splice operations */
+ spinlock_t addlist_lock;
+
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ unsigned long next_query;
+};
+
struct mlx5_eswitch;
struct mlx5_priv {
@@ -511,6 +540,10 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_root_namespace *esw_egress_root_ns;
+ struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+
+ struct mlx5_fc_stats fc_stats;
};
enum mlx5_device_state {
@@ -519,8 +552,9 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN,
- MLX5_INTERFACE_STATE_UP,
+ MLX5_INTERFACE_STATE_DOWN = BIT(0),
+ MLX5_INTERFACE_STATE_UP = BIT(1),
+ MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
};
enum mlx5_pci_status {
@@ -544,7 +578,7 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
- enum mlx5_interface_state interface_state;
+ unsigned long intf_state;
void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
unsigned long param);
@@ -552,6 +586,9 @@ struct mlx5_core_dev {
struct mlx5_profile *profile;
atomic_t num_qps;
u32 issi;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
struct mlx5_db {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 8dec5508d93d..4b7a107d9c19 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -58,6 +58,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
MLX5_FLOW_NAMESPACE_FDB,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
};
struct mlx5_flow_table;
@@ -71,6 +73,7 @@ struct mlx5_flow_destination {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
+ struct mlx5_fc *counter;
};
};
@@ -82,12 +85,19 @@ struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- int max_num_groups);
+ int max_num_groups,
+ u32 level);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
- int num_flow_table_entries);
+ int num_flow_table_entries,
+ u32 level);
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries,
+ u32 level, u16 vport);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
@@ -113,4 +123,13 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest);
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule);
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
+
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c15b8a864937..9a05cd7e5890 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -202,6 +202,9 @@ enum {
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
};
@@ -265,7 +268,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 reserved_at_1[0x2];
+ u8 reserved_at_1[0x1];
+ u8 flow_counter[0x1];
u8 flow_modify_en[0x1];
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
@@ -513,7 +517,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 max_lso_cap[0x5];
u8 reserved_at_10[0x4];
u8 rss_ind_tbl_cap[0x4];
- u8 reserved_at_18[0x3];
+ u8 reg_umr_sq[0x1];
+ u8 scatter_fcs[0x1];
+ u8 reserved_at_1a[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_at_1c[0x2];
u8 tunnel_statless_gre[0x1];
@@ -648,7 +654,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
- MLX5_WQ_TYPE_STRQ = 0x2,
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
};
enum {
@@ -750,21 +756,25 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
- u8 early_vf_enable;
- u8 reserved_at_1a8[0x2];
+ u8 early_vf_enable[0x1];
+ u8 reserved_at_1a9[0x2];
u8 local_ca_ack_delay[0x5];
- u8 reserved_at_1af[0x6];
+ u8 reserved_at_1af[0x2];
+ u8 ports_check[0x1];
+ u8 reserved_at_1b2[0x1];
+ u8 disable_link_up[0x1];
+ u8 beacon_led[0x1];
u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_at_1bf[0x3];
+ u8 reserved_at_1c0[0x3];
u8 log_max_msg[0x5];
- u8 reserved_at_1c7[0x4];
+ u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
- u8 reserved_at_1cf[0x6];
+ u8 reserved_at_1d0[0x6];
u8 rol_s[0x1];
u8 rol_g[0x1];
- u8 reserved_at_1d7[0x1];
+ u8 reserved_at_1d8[0x1];
u8 wol_s[0x1];
u8 wol_g[0x1];
u8 wol_a[0x1];
@@ -774,47 +784,48 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
- u8 reserved_at_1ef[0xc];
+ u8 reserved_at_1f0[0xc];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
- u8 reserved_at_200[0x3];
+ u8 striding_rq[0x1];
+ u8 reserved_at_201[0x2];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_204[0xa];
+ u8 reserved_at_205[0xa];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_at_212[0x1];
+ u8 reserved_at_213[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_at_215[0x1];
+ u8 reserved_at_216[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
u8 dct[0x1];
- u8 reserved_at_21a[0x1];
+ u8 reserved_at_21b[0x1];
u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 reserved_at_21e[0x1];
+ u8 reserved_at_21f[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_at_222[0x3];
+ u8 reserved_at_223[0x3];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_at_228[0x1];
+ u8 reserved_at_229[0x1];
u8 scqe_break_moderation[0x1];
- u8 reserved_at_22a[0x1];
+ u8 cq_period_start_from_cqe[0x1];
u8 cd[0x1];
- u8 reserved_at_22c[0x1];
+ u8 reserved_at_22d[0x1];
u8 apm[0x1];
u8 vector_calc[0x1];
- u8 reserved_at_22f[0x1];
+ u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_231[0x4];
+ u8 reserved_at_232[0x4];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -824,104 +835,109 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_at_23f[0xa];
+ u8 reserved_at_240[0xa];
u8 uar_sz[0x6];
- u8 reserved_at_24f[0x8];
+ u8 reserved_at_250[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_at_260[0x1];
+ u8 reserved_at_261[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_262[0x8];
+ u8 reserved_at_263[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_at_26f[0x10];
+ u8 reserved_at_270[0x10];
- u8 reserved_at_27f[0x10];
+ u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_29f[0x10];
+ u8 reserved_at_2a0[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_at_2bf[0x10];
+ u8 reserved_at_2c0[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_at_2df[0x7];
+ u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_at_2ff[0x18];
+ u8 reserved_at_300[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_at_31f[0x3];
+ u8 reserved_at_320[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_at_327[0x3];
+ u8 reserved_at_328[0x3];
u8 log_max_pd[0x5];
- u8 reserved_at_32f[0xb];
+ u8 reserved_at_330[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_at_33f[0x20];
+ u8 reserved_at_340[0x20];
- u8 reserved_at_35f[0x3];
+ u8 reserved_at_360[0x3];
u8 log_max_rq[0x5];
- u8 reserved_at_367[0x3];
+ u8 reserved_at_368[0x3];
u8 log_max_sq[0x5];
- u8 reserved_at_36f[0x3];
+ u8 reserved_at_370[0x3];
u8 log_max_tir[0x5];
- u8 reserved_at_377[0x3];
+ u8 reserved_at_378[0x3];
u8 log_max_tis[0x5];
u8 basic_cyclic_rcv_wqe[0x1];
- u8 reserved_at_380[0x2];
+ u8 reserved_at_381[0x2];
u8 log_max_rmp[0x5];
- u8 reserved_at_387[0x3];
+ u8 reserved_at_388[0x3];
u8 log_max_rqt[0x5];
- u8 reserved_at_38f[0x3];
+ u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_397[0x3];
+ u8 reserved_at_398[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_at_39f[0x3];
+ u8 reserved_at_3a0[0x3];
u8 log_max_stride_sz_rq[0x5];
- u8 reserved_at_3a7[0x3];
+ u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
- u8 reserved_at_3af[0x3];
+ u8 reserved_at_3b0[0x3];
u8 log_max_stride_sz_sq[0x5];
- u8 reserved_at_3b7[0x3];
+ u8 reserved_at_3b8[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_at_3bf[0x1b];
+ u8 reserved_at_3c0[0x1b];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_at_3e0[0xa];
+ u8 reserved_at_3e1[0xa];
u8 log_max_vlan_list[0x5];
- u8 reserved_at_3ef[0x3];
+ u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
- u8 reserved_at_3f7[0x3];
+ u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_at_3ff[0x80];
+ u8 reserved_at_400[0x80];
- u8 reserved_at_47f[0x3];
+ u8 reserved_at_480[0x3];
u8 log_max_l2_table[0x5];
- u8 reserved_at_487[0x8];
+ u8 reserved_at_488[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_at_49f[0x20];
+ u8 reserved_at_4a0[0x20];
u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20];
- u8 reserved_at_4ff[0x5f];
- u8 cqe_zip[0x1];
- u8 cqe_zip_timeout[0x10];
- u8 cqe_zip_max_num[0x10];
+ u8 reserved_at_500[0x80];
- u8 reserved_at_57f[0x220];
+ u8 reserved_at_580[0x3f];
+ u8 cqe_compression[0x1];
+
+ u8 cqe_compression_timeout[0x10];
+ u8 cqe_compression_max_num[0x10];
+
+ u8 reserved_at_5e0[0x220];
};
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
};
struct mlx5_ifc_dest_format_struct_bits {
@@ -931,6 +947,19 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_flow_counter_list_bits {
+ u8 reserved_at_0[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_at_20[0x20];
+};
+
+union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+ struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+ struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
@@ -997,7 +1026,13 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x4e0];
+ u8 reserved_at_120[0x15];
+ u8 log_wqe_num_of_strides[0x3];
+ u8 two_byte_shift_en[0x1];
+ u8 reserved_at_139[0x4];
+ u8 log_wqe_stride_size[0x3];
+
+ u8 reserved_at_140[0x4c0];
struct mlx5_ifc_cmd_pas_bits pas[0];
};
@@ -1990,6 +2025,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+ MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
};
struct mlx5_ifc_flow_context_bits {
@@ -2006,13 +2042,16 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_80[0x8];
u8 destination_list_size[0x18];
- u8 reserved_at_a0[0x160];
+ u8 reserved_at_a0[0x8];
+ u8 flow_counter_list_size[0x18];
+
+ u8 reserved_at_c0[0x140];
struct mlx5_ifc_fte_match_param_bits match_value;
u8 reserved_at_1200[0x600];
- struct mlx5_ifc_dest_format_struct_bits destination[0];
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
};
enum {
@@ -2196,7 +2235,8 @@ struct mlx5_ifc_sqc_bits {
u8 flush_in_error_en[0x1];
u8 reserved_at_4[0x4];
u8 state[0x4];
- u8 reserved_at_c[0x14];
+ u8 reg_umr[0x1];
+ u8 reserved_at_d[0x13];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -2244,7 +2284,8 @@ enum {
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_at_1[0x2];
+ u8 reserved_at_1[0x1];
+ u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
u8 state[0x4];
@@ -2601,6 +2642,11 @@ enum {
MLX5_CQC_ST_FIRED = 0xa,
};
+enum {
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+};
+
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x4];
@@ -2609,8 +2655,8 @@ struct mlx5_ifc_cqc_bits {
u8 reserved_at_c[0x1];
u8 scqe_break_moderation_en[0x1];
u8 oi[0x1];
- u8 reserved_at_f[0x2];
- u8 cqe_zip_en[0x1];
+ u8 cq_period_mode[0x2];
+ u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
u8 reserved_at_18[0x8];
@@ -2984,7 +3030,11 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -3910,6 +3960,34 @@ struct mlx5_ifc_query_flow_group_in_bits {
u8 reserved_at_e0[0x120];
};
+struct mlx5_ifc_query_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_at_c1[0xf];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_e0[0x10];
+ u8 flow_counter_id[0x10];
+};
+
struct mlx5_ifc_query_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5178,7 +5256,11 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5205,7 +5287,11 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5346,7 +5432,11 @@ struct mlx5_ifc_delete_fte_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5471,6 +5561,28 @@ struct mlx5_ifc_dealloc_pd_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_create_xrc_srq_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5792,7 +5904,11 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -5836,7 +5952,11 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -6190,6 +6310,28 @@ struct mlx5_ifc_alloc_pd_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6369,6 +6511,17 @@ struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_mlcr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x20];
+
+ u8 beacon_duration[0x10];
+ u8 reserved_at_40[0x10];
+
+ u8 beacon_remain[0x10];
+};
+
struct mlx5_ifc_ptas_reg_bits {
u8 reserved_at_0[0x20];
@@ -6778,6 +6931,16 @@ struct mlx5_ifc_pamp_reg_bits {
u8 index_data[18][0x10];
};
+struct mlx5_ifc_pcmr_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x2e];
+ u8 fcs_cap[0x1];
+ u8 reserved_at_3f[0x1f];
+ u8 fcs_chk[0x1];
+ u8 reserved_at_5f[0x1];
+};
+
struct mlx5_ifc_lane_2_module_mapping_bits {
u8 reserved_at_0[0x6];
u8 rx_lane[0x2];
@@ -7114,6 +7277,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pspa_reg_bits pspa_reg;
struct mlx5_ifc_ptas_reg_bits ptas_reg;
struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_mlcr_reg_bits mlcr_reg;
struct mlx5_ifc_pude_reg_bits pude_reg;
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
@@ -7147,7 +7311,11 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
u8 table_type[0x8];
u8 reserved_at_88[0x18];
@@ -7178,7 +7346,9 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x20];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
@@ -7244,4 +7414,34 @@ struct mlx5_ifc_qtct_reg_bits {
u8 tclass[0x3];
};
+struct mlx5_ifc_mcia_reg_bits {
+ u8 l[0x1];
+ u8 reserved_at_1[0x7];
+ u8 module[0x8];
+ u8 reserved_at_10[0x8];
+ u8 status[0x8];
+
+ u8 i2c_device_address[0x8];
+ u8 page_number[0x8];
+ u8 device_address[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 size[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 dword_0[0x20];
+ u8 dword_1[0x20];
+ u8 dword_2[0x20];
+ u8 dword_3[0x20];
+ u8 dword_4[0x20];
+ u8 dword_5[0x20];
+ u8 dword_6[0x20];
+ u8 dword_7[0x20];
+ u8 dword_8[0x20];
+ u8 dword_9[0x20];
+ u8 dword_10[0x20];
+ u8 dword_11[0x20];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index a1d145abd4eb..9851862c0ec5 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -35,6 +35,24 @@
#include <linux/mlx5/driver.h>
+enum mlx5_beacon_duration {
+ MLX5_BEACON_DURATION_OFF = 0x0,
+ MLX5_BEACON_DURATION_INF = 0xffff,
+};
+
+enum mlx5_module_id {
+ MLX5_MODULE_ID_SFP = 0x3,
+ MLX5_MODULE_ID_QSFP = 0xC,
+ MLX5_MODULE_ID_QSFP_PLUS = 0xD,
+ MLX5_MODULE_ID_QSFP28 = 0x11,
+};
+
+#define MLX5_EEPROM_MAX_BYTES 32
+#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
+#define MLX5_I2C_ADDR_LOW 0x50
+#define MLX5_I2C_ADDR_HIGH 0x51
+#define MLX5_EEPROM_PAGE_LENGTH 256
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
@@ -53,10 +71,11 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@ -84,4 +103,10 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data);
+
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index cf031a3f16c5..64221027bf1f 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -668,6 +668,12 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq);
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+ int reset, void *out, int out_size);
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+ u32 *out_of_buffer);
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index bd93e6323603..301da4a5e6bf 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d1b7b5..2b97be1147ec 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -72,6 +72,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
+#ifndef page_to_virt
+#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
+#endif
+
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
@@ -443,14 +447,14 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline int is_vmalloc_addr(const void *x)
+static inline bool is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;
return addr >= VMALLOC_START && addr < VMALLOC_END;
#else
- return 0;
+ return false;
#endif
}
#ifdef CONFIG_MMU
@@ -500,11 +504,20 @@ static inline int page_mapcount(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
#else
static inline int total_mapcount(struct page *page)
{
return page_mapcount(page);
}
+static inline int page_trans_huge_mapcount(struct page *page,
+ int *total_mapcount)
+{
+ int mapcount = page_mapcount(page);
+ if (total_mapcount)
+ *total_mapcount = mapcount;
+ return mapcount;
+}
#endif
static inline struct page *virt_to_head_page(const void *x)
@@ -623,7 +636,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
*
* A page may belong to an inode's memory mapping. In this case, page->mapping
* is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_CACHE_SIZE.
+ * in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
@@ -721,7 +734,7 @@ static inline void get_page(struct page *page)
page = compound_head(page);
/*
* Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_count.
+ * requires to already have an elevated page->_refcount.
*/
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
page_ref_inc(page);
@@ -837,10 +850,7 @@ extern int page_cpupid_xchg_last(struct page *page, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
- int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
-
- page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
- page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
+ page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
@@ -948,7 +958,7 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
static __always_inline void *lowmem_page_address(const struct page *page)
{
- return __va(PFN_PHYS(page_to_pfn(page)));
+ return page_to_virt(page);
}
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
@@ -1019,24 +1029,7 @@ static inline pgoff_t page_file_index(struct page *page)
return page->index;
}
-/*
- * Return true if this page is mapped into pagetables.
- * For compound page it returns true if any subpage of compound page is mapped.
- */
-static inline bool page_mapped(struct page *page)
-{
- int i;
- if (likely(!PageCompound(page)))
- return atomic_read(&page->_mapcount) >= 0;
- page = compound_head(page);
- if (atomic_read(compound_mapcount_ptr(page)) >= 0)
- return true;
- for (i = 0; i < hpage_nr_pages(page); i++) {
- if (atomic_read(&page[i]._mapcount) >= 0)
- return true;
- }
- return false;
-}
+bool page_mapped(struct page *page);
/*
* Return true only if the page has been allocated with
@@ -1138,6 +1131,8 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
@@ -1250,78 +1245,20 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages6(unsigned long start, unsigned long nr_pages,
+long get_user_pages(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages_locked6(unsigned long start, unsigned long nr_pages,
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
unsigned int gup_flags);
-long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages,
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
-/* suppress warnings from use in EXPORT_SYMBOL() */
-#ifndef __DISABLE_GUP_DEPRECATED
-#define __gup_deprecated __deprecated
-#else
-#define __gup_deprecated
-#endif
-/*
- * These macros provide backward-compatibility with the old
- * get_user_pages() variants which took tsk/mm. These
- * functions/macros provide both compile-time __deprecated so we
- * can catch old-style use and not break the build. The actual
- * functions also have WARN_ON()s to let us know at runtime if
- * the get_user_pages() should have been the "remote" variant.
- *
- * These are hideous, but temporary.
- *
- * If you run into one of these __deprecated warnings, look
- * at how you are calling get_user_pages(). If you are calling
- * it with current/current->mm as the first two arguments,
- * simply remove those arguments. The behavior will be the same
- * as it is now. If you are calling it on another task, use
- * get_user_pages_remote() instead.
- *
- * Any questions? Ask Dave Hansen <dave@sr71.net>
- */
-long
-__gup_deprecated
-get_user_pages8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- struct vm_area_struct **vmas);
-#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \
- get_user_pages
-#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \
- get_user_pages8, x, \
- get_user_pages6, x, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked);
-#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \
- get_user_pages_locked
-#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \
- get_user_pages_locked8, x, \
- get_user_pages_locked6, x, x, x, x)(__VA_ARGS__)
-
-__gup_deprecated
-long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages);
-#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \
- get_user_pages_unlocked
-#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \
- get_user_pages_unlocked7, x, \
- get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__)
-
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 712e8c37a200..5bd29ba4f174 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -22,22 +22,34 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
+static __always_inline void __update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int nr_pages)
+{
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
+}
+
+static __always_inline void update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int nr_pages)
+{
+#ifdef CONFIG_MEMCG
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+#else
+ __update_lru_size(lruvec, lru, nr_pages);
+#endif
+}
+
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- int nr_pages = hpage_nr_pages(page);
- mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+ update_lru_size(lruvec, lru, hpage_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
- __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- int nr_pages = hpage_nr_pages(page);
- mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
- __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
+ update_lru_size(lruvec, lru, -hpage_nr_pages(page));
}
/**
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b37313b..1fda9c99ef95 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -73,9 +73,9 @@ struct page {
unsigned long counters;
#else
/*
- * Keep _count separate from slub cmpxchg_double data.
- * As the rest of the double word is protected by
- * slab_lock but _count is not.
+ * Keep _refcount separate from slub cmpxchg_double
+ * data. As the rest of the double word is protected by
+ * slab_lock but _refcount is not.
*/
unsigned counters;
#endif
@@ -97,7 +97,11 @@ struct page {
};
int units; /* SLOB */
};
- atomic_t _count; /* Usage count, see below. */
+ /*
+ * Usage count, *USE WRAPPER FUNCTION*
+ * when manual accounting. See page_ref.h
+ */
+ atomic_t _refcount;
};
unsigned int active; /* SLAB */
};
@@ -248,7 +252,7 @@ struct page_frag_cache {
__u32 offset;
#endif
/* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_count every time we allocate a fragment.
+ * containing page->_refcount every time we allocate a fragment.
*/
unsigned int pagecnt_bias;
bool pfmemalloc;
@@ -341,7 +345,7 @@ struct vm_area_struct {
/* Information about our backing store: */
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
+ units */
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 7b41c6db1bb6..f7ed271a1d54 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -36,7 +36,6 @@ enum {
EVENT_XFER_COMPLETE,
EVENT_DATA_COMPLETE,
EVENT_DATA_ERROR,
- EVENT_XFER_ERROR
};
struct mmc_data;
@@ -55,6 +54,7 @@ struct dw_mci_dma_slave {
/**
* struct dw_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
+ * @irq_lock: Spinlock protecting the INTMASK setting.
* @regs: Pointer to MMIO registers.
* @fifo_reg: Pointer to MMIO registers for data FIFO
* @sg: Scatterlist entry currently being processed by PIO code, if any.
@@ -65,6 +65,9 @@ struct dw_mci_dma_slave {
* @cmd: The command currently being sent to the card, or NULL.
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
+ * @stop_abort: The command currently prepared for stoping transfer.
+ * @prev_blksz: The former transfer blksz record.
+ * @timing: Record of current ios timing.
* @use_dma: Whether DMA channel is initialized or not.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
@@ -72,7 +75,10 @@ struct dw_mci_dma_slave {
* @sg_cpu: Virtual address of DMA buffer.
* @dma_ops: Pointer to platform-specific DMA callbacks.
* @cmd_status: Snapshot of SR taken upon completion of the current
+ * @ring_size: Buffer size for idma descriptors.
* command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @dms: structure of slave-dma private data.
+ * @phy_regs: physical address of controller's register map
* @data_status: Snapshot of SR taken upon completion of the current
* data transfer. Only valid when EVENT_DATA_COMPLETE or
* EVENT_DATA_ERROR is pending.
@@ -80,7 +86,6 @@ struct dw_mci_dma_slave {
* to be sent.
* @dir_status: Direction of current transfer.
* @tasklet: Tasklet running the request state machine.
- * @card_tasklet: Tasklet handling card detect.
* @pending_events: Bitmask of events flagged by the interrupt handler
* to be processed by the tasklet.
* @completed_events: Bitmask of events which the state machine has
@@ -91,6 +96,7 @@ struct dw_mci_dma_slave {
* rate and timeout calculations.
* @current_speed: Configured rate of the controller.
* @num_slots: Number of slots available.
+ * @fifoth_val: The value of FIFOTH register.
* @verid: Denote Version ID.
* @dev: Device associated with the MMC controller.
* @pdata: Platform data associated with the MMC controller.
@@ -107,9 +113,11 @@ struct dw_mci_dma_slave {
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
* @quirks: Set of quirks that apply to specific versions of the IP.
+ * @vqmmc_enabled: Status of vqmmc, should be true or false.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
* @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ * @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
* @dto_timer: Timer for broken data transfer over scheme.
*
* Locking
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8dd4d290ab0d..85800b48241f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -93,28 +93,39 @@ struct mmc_host_ops {
void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
bool is_first_req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
+
+ /*
+ * Avoid calling the next three functions too often or in a "fast
+ * path", since underlaying controller might implement them in an
+ * expensive and/or slow way. Also note that these functions might
+ * sleep, so don't call them in the atomic contexts!
+ */
+
+ /*
+ * Notes to the set_ios callback:
+ * ios->clock might be 0. For some controllers, setting 0Hz
+ * as any other frequency works. However, some controllers
+ * explicitly need to disable the clock. Otherwise e.g. voltage
+ * switching might fail because the SDCLK is not really quiet.
+ */
+ void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
+
/*
- * Avoid calling these three functions too often or in a "fast path",
- * since underlaying controller might implement them in an expensive
- * and/or slow way.
- *
- * Also note that these functions might sleep, so don't call them
- * in the atomic contexts!
- *
* Return values for the get_ro callback should be:
* 0 for a read/write card
* 1 for a read-only card
* -ENOSYS when not supported (equal to NULL callback)
* or a negative errno value when something bad happened
- *
+ */
+ int (*get_ro)(struct mmc_host *host);
+
+ /*
* Return values for the get_cd callback should be:
* 0 for a absent card
* 1 for a present card
* -ENOSYS when not supported (equal to NULL callback)
* or a negative errno value when something bad happened
*/
- void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
- int (*get_ro)(struct mmc_host *host);
int (*get_cd)(struct mmc_host *host);
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 83430f2ea757..0d126aeb3ec0 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -36,6 +36,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
deleted file mode 100644
index 95d6f0314a7d..000000000000
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef LINUX_MMC_SH_MOBILE_SDHI_H
-#define LINUX_MMC_SH_MOBILE_SDHI_H
-
-#include <linux/types.h>
-
-#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
-#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
-#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
-
-#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
deleted file mode 100644
index 5f5cd80e9765..000000000000
--- a/include/linux/mmc/tmio.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * include/linux/mmc/tmio.h
- *
- * Copyright (C) 2016 Sang Engineering, Wolfram Sang
- * Copyright (C) 2015-16 Renesas Electronics Corporation
- * Copyright (C) 2007 Ian Molton
- * Copyright (C) 2004 Ian Molton
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
- */
-#ifndef LINUX_MMC_TMIO_H
-#define LINUX_MMC_TMIO_H
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_STATUS2 0x1e
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_SDIO_STATUS 0x36
-#define CTL_SDIO_IRQ_MASK 0x38
-#define CTL_DMA_ENABLE 0xd8
-#define CTL_RESET_SD 0xe0
-#define CTL_VERSION 0xe2
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND 0x00000001
-#define TMIO_STAT_DATAEND 0x00000004
-#define TMIO_STAT_CARD_REMOVE 0x00000008
-#define TMIO_STAT_CARD_INSERT 0x00000010
-#define TMIO_STAT_SIGSTATE 0x00000020
-#define TMIO_STAT_WRPROTECT 0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A 0x00000400
-#define TMIO_STAT_CMD_IDX_ERR 0x00010000
-#define TMIO_STAT_CRCFAIL 0x00020000
-#define TMIO_STAT_STOPBIT_ERR 0x00040000
-#define TMIO_STAT_DATATIMEOUT 0x00080000
-#define TMIO_STAT_RXOVERFLOW 0x00100000
-#define TMIO_STAT_TXUNDERRUN 0x00200000
-#define TMIO_STAT_CMDTIMEOUT 0x00400000
-#define TMIO_STAT_RXRDY 0x01000000
-#define TMIO_STAT_TXRQ 0x02000000
-#define TMIO_STAT_ILL_FUNC 0x20000000
-#define TMIO_STAT_CMD_BUSY 0x40000000
-#define TMIO_STAT_ILL_ACCESS 0x80000000
-
-#define CLK_CTL_DIV_MASK 0xff
-#define CLK_CTL_SCLKEN BIT(8)
-
-#define TMIO_BBS 512 /* Boot block size */
-
-#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index 70fffeba7495..a4441784503b 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -1,9 +1,16 @@
#ifndef _LINUX_MMU_CONTEXT_H
#define _LINUX_MMU_CONTEXT_H
+#include <asm/mmu_context.h>
+
struct mm_struct;
void use_mm(struct mm_struct *mm);
void unuse_mm(struct mm_struct *mm);
+/* Architectures that care about IRQ state in switch_mm can override this. */
+#ifndef switch_mm_irqs_off
+# define switch_mm_irqs_off switch_mm
+#endif
+
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c60df9257cc7..c60db2096fd8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -85,13 +85,6 @@ extern int page_group_by_mobility_disabled;
get_pfnblock_flags_mask(page, page_to_pfn(page), \
PB_migrate_end, MIGRATETYPE_MASK)
-static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
-{
- BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
- return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
- MIGRATETYPE_MASK);
-}
-
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
@@ -747,7 +740,8 @@ extern struct mutex zonelists_mutex;
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, unsigned int order,
- unsigned long mark, int classzone_idx, int alloc_flags);
+ unsigned long mark, int classzone_idx,
+ unsigned int alloc_flags);
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, int classzone_idx);
enum memmap_context {
@@ -828,10 +822,7 @@ static inline int is_highmem_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone)
{
#ifdef CONFIG_HIGHMEM
- int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
- return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
- (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
- zone_movable_is_highmem());
+ return is_highmem_idx(zone_idx(zone));
#else
return 0;
#endif
@@ -922,6 +913,10 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
#endif /* CONFIG_NUMA */
}
+struct zoneref *__next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes);
+
/**
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
* @z - The cursor used as a starting point for the search
@@ -934,9 +929,14 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
* being examined. It should be advanced by one before calling
* next_zones_zonelist again.
*/
-struct zoneref *next_zones_zonelist(struct zoneref *z,
+static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
- nodemask_t *nodes);
+ nodemask_t *nodes)
+{
+ if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
+ return z;
+ return __next_zones_zonelist(z, highest_zoneidx, nodes);
+}
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
@@ -952,13 +952,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
- nodemask_t *nodes,
- struct zone **zone)
+ nodemask_t *nodes)
{
- struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
+ return next_zones_zonelist(zonelist->_zonerefs,
highest_zoneidx, nodes);
- *zone = zonelist_zone(z);
- return z;
}
/**
@@ -973,10 +970,17 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
* within a given nodemask
*/
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
- for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
+ for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
+ zone; \
+ z = next_zones_zonelist(++z, highidx, nodemask), \
+ zone = zonelist_zone(z))
+
+#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+ for (zone = z->zone; \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
- zone = zonelist_zone(z)) \
+ zone = zonelist_zone(z))
+
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
diff --git a/include/linux/module.h b/include/linux/module.h
index 2bb0c3085706..3daf2b3a09d2 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -330,6 +330,15 @@ struct mod_kallsyms {
char *strtab;
};
+#ifdef CONFIG_LIVEPATCH
+struct klp_modinfo {
+ Elf_Ehdr hdr;
+ Elf_Shdr *sechdrs;
+ char *secstrings;
+ unsigned int symndx;
+};
+#endif
+
struct module {
enum module_state state;
@@ -456,7 +465,11 @@ struct module {
#endif
#ifdef CONFIG_LIVEPATCH
+ bool klp; /* Is this a livepatch module? */
bool klp_alive;
+
+ /* Elf information */
+ struct klp_modinfo *klp_info;
#endif
#ifdef CONFIG_MODULE_UNLOAD
@@ -630,6 +643,18 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested;
}
+#ifdef CONFIG_LIVEPATCH
+static inline bool is_livepatch_module(struct module *mod)
+{
+ return mod->klp;
+}
+#else /* !CONFIG_LIVEPATCH */
+static inline bool is_livepatch_module(struct module *mod)
+{
+ return false;
+}
+#endif /* CONFIG_LIVEPATCH */
+
#else /* !CONFIG_MODULES... */
/* Given an address, look for it in the exception tables. */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 771272187316..ef9fea4fc400 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -283,17 +283,7 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
const u_char *buf);
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
-
-static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- ops->retlen = ops->oobretlen = 0;
- if (!mtd->_write_oob)
- return -EOPNOTSUPP;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return mtd->_write_oob(mtd, to, ops);
-}
+int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
struct otp_info *buf);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 77d01700daf7..ec5ec2818a28 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -79,6 +79,8 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+struct qstr;
+extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
diff --git a/include/linux/net.h b/include/linux/net.h
index 49175e4ced11..9aa49a05fe38 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -218,8 +218,7 @@ int sock_create_lite(int family, int type, int proto, struct socket **res);
struct socket *sock_alloc(void);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
-int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
- int flags);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
struct socket *sock_from_file(struct file *file, int *err);
@@ -246,7 +245,15 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ net_ratelimit()) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a734bf43d190..aa7b2400f98c 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -39,14 +39,19 @@ enum {
NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
+ NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */
NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
NETIF_F_FSO_BIT, /* ... FCoE segmentation */
NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */
- NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
- NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
+ NETIF_F_GSO_IPXIP4_BIT, /* ... IP4 or IP6 over IP4 with TSO */
+ NETIF_F_GSO_IPXIP6_BIT, /* ... IP4 or IP6 over IP6 with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
+ NETIF_F_GSO_PARTIAL_BIT, /* ... Only segment inner-most L4
+ * in hardware and all other
+ * headers in software.
+ */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
@@ -116,10 +121,12 @@ enum {
#define NETIF_F_RXALL __NETIF_F(RXALL)
#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
#define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM)
-#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
-#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
+#define NETIF_F_GSO_IPXIP4 __NETIF_F(GSO_IPXIP4)
+#define NETIF_F_GSO_IPXIP6 __NETIF_F(GSO_IPXIP6)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
+#define NETIF_F_TSO_MANGLEID __NETIF_F(TSO_MANGLEID)
+#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
@@ -145,10 +152,6 @@ enum {
#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
-/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
- NETIF_F_TSO6 | NETIF_F_UFO)
-
/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
* set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
* this would be contradictory
@@ -156,11 +159,15 @@ enum {
#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
NETIF_F_HW_CSUM)
-#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
NETIF_F_FSO)
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO)
+
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
@@ -193,8 +200,8 @@ enum {
#define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \
NETIF_F_GSO_GRE_CSUM | \
- NETIF_F_GSO_IPIP | \
- NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cb0d5d09c2e4..f45929ce8157 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -106,7 +106,6 @@ enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
- NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
};
typedef enum netdev_tx netdev_tx_t;
@@ -570,28 +569,27 @@ struct netdev_queue {
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
int numa_node;
#endif
+ unsigned long tx_maxrate;
+ /*
+ * Number of TX timeouts for this queue
+ * (/sys/class/net/DEV/Q/trans_timeout)
+ */
+ unsigned long trans_timeout;
/*
* write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
- * please use this field instead of dev->trans_start
+ * Time (in jiffies) of last Tx
*/
unsigned long trans_start;
- /*
- * Number of TX timeouts for this queue
- * (/sys/class/net/DEV/Q/trans_timeout)
- */
- unsigned long trans_timeout;
-
unsigned long state;
#ifdef CONFIG_BQL
struct dql dql;
#endif
- unsigned long tx_maxrate;
} ____cacheline_aligned_in_smp;
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -831,7 +829,6 @@ struct tc_to_netdev {
* the queue before that can happen; it's for obsolete devices and weird
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
- * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
@@ -1548,7 +1545,6 @@ enum netdev_priv_flags {
*
* @offload_fwd_mark: Offload device fwding mark
*
- * @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -1586,8 +1582,6 @@ enum netdev_priv_flags {
* @gso_max_size: Maximum size of generic segmentation offload
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
- * @gso_min_segs: Minimum number of segments that can be passed to the
- * NIC for GSO
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1656,6 +1650,7 @@ struct net_device {
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
+ netdev_features_t gso_partial_features;
int ifindex;
int group;
@@ -1798,13 +1793,6 @@ struct net_device {
#endif
/* These may be needed for future network-power-down code. */
-
- /*
- * trans_start here is expensive for high speed devices on SMP,
- * please use netdev_queue->trans_start instead.
- */
- unsigned long trans_start;
-
struct timer_list watchdog_timer;
int __percpu *pcpu_refcnt;
@@ -1858,7 +1846,7 @@ struct net_device {
unsigned int gso_max_size;
#define GSO_MAX_SEGS 65535
u16 gso_max_segs;
- u16 gso_min_segs;
+
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
@@ -2120,7 +2108,13 @@ struct napi_gro_cb {
/* Used in foo-over-udp, set in udp[46]_gro_receive */
u8 is_ipv6:1;
- /* 7 bit hole */
+ /* Used in GRE, set in fou/gue_gro_receive */
+ u8 is_fou:1;
+
+ /* Used to determine if flush_id can be ignored */
+ u8 is_atomic:1;
+
+ /* 5 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2159,23 +2153,6 @@ struct packet_offload {
struct list_head list;
};
-struct udp_offload;
-
-struct udp_offload_callbacks {
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff);
- int (*gro_complete)(struct sk_buff *skb,
- int nhoff,
- struct udp_offload *uoff);
-};
-
-struct udp_offload {
- __be16 port;
- u8 ipproto;
- struct udp_offload_callbacks callbacks;
-};
-
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
@@ -2256,6 +2233,8 @@ struct netdev_lag_lower_state_info {
#define NETDEV_BONDING_INFO 0x0019
#define NETDEV_PRECHANGEUPPER 0x001A
#define NETDEV_CHANGELOWERSTATE 0x001B
+#define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C
+#define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2747,7 +2726,6 @@ struct softnet_data {
/* stats */
unsigned int processed;
unsigned int time_squeeze;
- unsigned int cpu_collision;
unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
@@ -2760,11 +2738,15 @@ struct softnet_data {
struct sk_buff *completion_queue;
#ifdef CONFIG_RPS
- /* Elements below can be accessed between CPUs for RPS */
+ /* input_queue_head should be written by cpu owning this struct,
+ * and only read by other cpus. Worth using a cache line.
+ */
+ unsigned int input_queue_head ____cacheline_aligned_in_smp;
+
+ /* Elements below can be accessed between CPUs for RPS/RFS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
- unsigned int input_queue_head;
unsigned int input_queue_tail;
#endif
unsigned int dropped;
@@ -2801,7 +2783,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2851,7 +2833,7 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
}
}
-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -3273,7 +3255,10 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
-bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
+bool is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb);
+
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
@@ -3490,6 +3475,15 @@ static inline void txq_trans_update(struct netdev_queue *txq)
txq->trans_start = jiffies;
}
+/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
+static inline void netif_trans_update(struct net_device *dev)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+ if (txq->trans_start != jiffies)
+ txq->trans_start = jiffies;
+}
+
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
@@ -3765,7 +3759,6 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
-extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -4001,21 +3994,23 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 0e1f433cc4b7..83b9a2e0d8d4 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -234,6 +234,10 @@ struct ip_set {
spinlock_t lock;
/* References to the set */
u32 ref;
+ /* References to the set for netlink events like dump,
+ * ref can be swapped out by ip_set_swap
+ */
+ u32 ref_netlink;
/* The core set type */
struct ip_set_type *type;
/* The type variant doing the real job */
@@ -347,7 +351,8 @@ ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
nla_put_net64(skb, IPSET_ATTR_SKBMARK,
cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask))) ||
+ skbinfo->skbmarkmask),
+ IPSET_ATTR_PAD)) ||
(skbinfo->skbprio &&
nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
cpu_to_be32(skbinfo->skbprio))) ||
@@ -370,9 +375,11 @@ static inline bool
ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
{
return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter))) ||
+ cpu_to_be64(ip_set_get_bytes(counter)),
+ IPSET_ATTR_PAD) ||
nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)));
+ cpu_to_be64(ip_set_get_packets(counter)),
+ IPSET_ATTR_PAD);
}
static inline void
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 80a305b85323..dc4f58a3cdcc 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -242,11 +242,18 @@ void xt_unregister_match(struct xt_match *target);
int xt_register_matches(struct xt_match *match, unsigned int n);
void xt_unregister_matches(struct xt_match *match, unsigned int n);
+int xt_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
+
int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
bool inv_proto);
+void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+ struct xt_counters_info *info, bool compat);
+
struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
struct xt_table_info *bootstrap,
@@ -373,16 +380,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
* allows us to return 0 for single core systems without forcing
* callers to deal with SMP vs. NONSMP issues.
*/
-static inline u64 xt_percpu_counter_alloc(void)
+static inline unsigned long xt_percpu_counter_alloc(void)
{
if (nr_cpu_ids > 1) {
void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
sizeof(struct xt_counters));
if (res == NULL)
- return (u64) -ENOMEM;
+ return -ENOMEM;
- return (u64) (__force unsigned long) res;
+ return (__force unsigned long) res;
}
return 0;
@@ -480,7 +487,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
int xt_compat_match_offset(const struct xt_match *match);
-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
unsigned int *size);
int xt_compat_match_to_user(const struct xt_entry_match *m,
void __user **dstptr, unsigned int *size);
@@ -490,6 +497,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
unsigned int *size);
int xt_compat_target_to_user(const struct xt_entry_target *t,
void __user **dstptr, unsigned int *size);
+int xt_compat_check_entry_offsets(const void *base, const char *elems,
+ unsigned int target_offset,
+ unsigned int next_offset);
#endif /* CONFIG_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 67300f8e5f2f..d71278c3c5bd 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -163,11 +163,9 @@ struct nfs_inode {
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Number of in-flight sillydelete RPC calls */
- atomic_t silly_count;
- /* List of deferred sillydelete requests */
- struct hlist_head silly_list;
- wait_queue_head_t waitqueue;
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
@@ -445,10 +443,9 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- struct iov_iter *iter,
- loff_t pos);
+ struct iov_iter *iter);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
struct iov_iter *iter);
@@ -492,9 +489,6 @@ extern void nfs_release_automount_timer(void);
* linux/fs/nfs/unlink.c
*/
extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
-extern void nfs_wait_on_sillyrename(struct dentry *dentry);
-extern void nfs_block_sillyrename(struct dentry *dentry);
-extern void nfs_unblock_sillyrename(struct dentry *dentry);
/*
* linux/fs/nfs/write.c
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f2f650f136ee..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
struct page *wb_page; /* page to read in/write out */
struct nfs_open_context *wb_context; /* File state context info */
struct nfs_lock_context *wb_lock_context; /* lock context info */
- pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
- unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
+ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
+ unsigned int wb_offset, /* Offset & ~PAGE_MASK */
wb_pgbase, /* Start of page data */
wb_bytes; /* Length of request */
struct kref wb_kref; /* reference count */
@@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head)
static inline
loff_t req_offset(struct nfs_page *req)
{
- return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset;
+ return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d320906cf13e..ee8491dadbf3 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1468,10 +1468,10 @@ struct nfs_pgio_completion_ops {
};
struct nfs_unlinkdata {
- struct hlist_node list;
struct nfs_removeargs args;
struct nfs_removeres res;
- struct inode *dir;
+ struct dentry *dentry;
+ wait_queue_head_t wq;
struct rpc_cred *cred;
struct nfs_fattr dir_attr;
long timeout;
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
{
unsigned len = le16_to_cpu(dlen);
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == NILFS_MAX_REC_LEN)
return 1 << 16;
#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
static inline __le16 nilfs_rec_len_to_disk(unsigned len)
{
-#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
if (len == (1 << 16))
return cpu_to_le16(NILFS_MAX_REC_LEN);
else if (len > (1 << 16))
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 167342c2ce6b..0f6f6607f592 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -92,6 +92,8 @@ enum {
IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
+ IEEE802154_ATTR_PAD,
+
__IEEE802154_ATTR_MAX,
};
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 6e85889cf9ab..f746e44d4046 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -43,8 +43,10 @@
*
* int first_node(mask) Number lowest set bit, or MAX_NUMNODES
* int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * or MAX_NUMNODES
* int first_unset_node(mask) First node not set in mask, or
- * MAX_NUMNODES.
+ * MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
* NODE_MASK_ALL Initializer - all bits set
@@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+/*
+ * Find the next present node in src, starting after node n, wrapping around to
+ * the first node in src if needed. Returns MAX_NUMNODES if src is empty.
+ */
+#define next_node_in(n, src) __next_node_in((n), &(src))
+int __next_node_in(int node, const nodemask_t *srcp);
+
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index a55986f6fe38..7d51b2904cb7 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -21,13 +21,13 @@ enum {
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
NVME_REG_VS = 0x0008, /* Version */
NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
- NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */
+ NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
NVME_REG_CC = 0x0014, /* Controller Configuration */
NVME_REG_CSTS = 0x001c, /* Controller Status */
NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
- NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */
+ NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 7fcb681baadf..c7292e8ea080 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -75,6 +75,23 @@ struct of_phandle_args {
uint32_t args[MAX_PHANDLE_ARGS];
};
+struct of_phandle_iterator {
+ /* Common iterator information */
+ const char *cells_name;
+ int cell_count;
+ const struct device_node *parent;
+
+ /* List size information */
+ const __be32 *list_end;
+ const __be32 *phandle_end;
+
+ /* Current position state */
+ const __be32 *cur;
+ uint32_t cur_count;
+ phandle phandle;
+ struct device_node *node;
+};
+
struct of_reconfig_data {
struct device_node *dn;
struct property *prop;
@@ -133,7 +150,7 @@ void of_core_init(void);
static inline bool is_of_node(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_OF;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
}
static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
@@ -334,6 +351,18 @@ extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
+/* phandle iterator functions */
+extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
+ const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count);
+
+extern int of_phandle_iterator_next(struct of_phandle_iterator *it);
+extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
+ uint32_t *args,
+ int size);
+
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
@@ -608,6 +637,27 @@ static inline int of_count_phandle_with_args(struct device_node *np,
return -ENOSYS;
}
+static inline int of_phandle_iterator_init(struct of_phandle_iterator *it,
+ const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count)
+{
+ return -ENOSYS;
+}
+
+static inline int of_phandle_iterator_next(struct of_phandle_iterator *it)
+{
+ return -ENOSYS;
+}
+
+static inline int of_phandle_iterator_args(struct of_phandle_iterator *it,
+ uint32_t *args,
+ int size)
+{
+ return 0;
+}
+
static inline int of_alias_get_id(struct device_node *np, const char *stem)
{
return -ENOSYS;
@@ -685,6 +735,15 @@ static inline int of_node_to_nid(struct device_node *device)
}
#endif
+#ifdef CONFIG_OF_NUMA
+extern int of_numa_init(void);
+#else
+static inline int of_numa_init(void)
+{
+ return -ENOSYS;
+}
+#endif
+
static inline struct device_node *of_find_matching_node(
struct device_node *from,
const struct of_device_id *matches)
@@ -868,6 +927,12 @@ static inline int of_property_read_s32(const struct device_node *np,
return of_property_read_u32(np, propname, (u32*) out_value);
}
+#define of_for_each_phandle(it, err, np, ln, cn, cc) \
+ for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \
+ err = of_phandle_iterator_next(it); \
+ err == 0; \
+ err = of_phandle_iterator_next(it))
+
#define of_property_for_each_u32(np, propname, prop, p, u) \
for (prop = of_find_property(np, propname, NULL), \
p = of_prop_next_u32(prop, NULL, &u); \
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 01c0a556448b..37864734ca50 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -47,10 +47,6 @@ void __iomem *of_io_request_and_map(struct device_node *device,
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
-extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
-extern unsigned long pci_address_to_pio(phys_addr_t addr);
-extern phys_addr_t pci_pio_to_address(unsigned long pio);
-
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
extern struct of_pci_range *of_pci_range_parser_one(
@@ -86,11 +82,6 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
return NULL;
}
-static inline phys_addr_t pci_pio_to_address(unsigned long pio)
-{
- return 0;
-}
-
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 2fbe8682a66f..901ec01c9fba 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -37,8 +37,9 @@ extern bool of_fdt_is_big_endian(const void *blob,
unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
const char *const *compat);
-extern void of_fdt_unflatten_tree(const unsigned long *blob,
- struct device_node **mynodes);
+extern void *of_fdt_unflatten_tree(const unsigned long *blob,
+ struct device_node *dad,
+ struct device_node **mynodes);
/* TBD: Temporary export of fdt globals - remove when code fully merged */
extern int __initdata dt_root_addr_cells;
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index f8bcd0e21a26..bb3a5a2cd570 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -15,6 +15,7 @@
#define __LINUX_OF_GRAPH_H
#include <linux/types.h>
+#include <linux/errno.h>
/**
* struct of_endpoint - the OF graph endpoint data structure
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index ffbe4707d4aa..bd02b44902d0 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size);
extern void of_iommu_init(void);
-extern struct iommu_ops *of_iommu_configure(struct device *dev,
+extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np);
#else
@@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
}
static inline void of_iommu_init(void) { }
-static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
return NULL;
@@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
-struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
extern struct of_device_id __iommu_of_table;
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 628a43242a34..83b9c39bd8b7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -72,6 +72,14 @@ static inline bool oom_task_origin(const struct task_struct *p)
extern void mark_oom_victim(struct task_struct *tsk);
+#ifdef CONFIG_MMU
+extern void try_oom_reaper(struct task_struct *tsk);
+#else
+static inline void try_oom_reaper(struct task_struct *tsk)
+{
+}
+#endif
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 438694650471..113ee626a4dc 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -175,11 +175,6 @@ extern int padata_do_parallel(struct padata_instance *pinst,
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
-extern int padata_set_cpumasks(struct padata_instance *pinst,
- cpumask_var_t pcpumask,
- cpumask_var_t cbcpumask);
-extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
-extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
extern int padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f4ed4f1b0c77..a61e06e5fbce 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -371,10 +371,15 @@ PAGEFLAG(Idle, idle, PF_ANY)
#define PAGE_MAPPING_KSM 2
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+static __always_inline int PageAnonHead(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
static __always_inline int PageAnon(struct page *page)
{
page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ return PageAnonHead(page);
}
#ifdef CONFIG_KSM
@@ -517,6 +522,27 @@ static inline int PageTransCompound(struct page *page)
}
/*
+ * PageTransCompoundMap is the same as PageTransCompound, but it also
+ * guarantees the primary MMU has the entire compound page mapped
+ * through pmd_trans_huge, which in turn guarantees the secondary MMUs
+ * can also map the entire compound page. This allows the secondary
+ * MMUs to call get_user_pages() only once for each compound page and
+ * to immediately map the entire compound page with a single secondary
+ * MMU fault. If there will be a pmd split later, the secondary MMUs
+ * will get an update through the MMU notifier invalidation through
+ * split_huge_pmd().
+ *
+ * Unlike PageTransCompound, this is safe to be called only while
+ * split_huge_pmd() cannot run from under us, like if protected by the
+ * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * positives.
+ */
+static inline int PageTransCompoundMap(struct page *page)
+{
+ return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+}
+
+/*
* PageTransTail returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
@@ -559,6 +585,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
#else
TESTPAGEFLAG_FALSE(TransHuge)
TESTPAGEFLAG_FALSE(TransCompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap)
TESTPAGEFLAG_FALSE(TransTail)
TESTPAGEFLAG_FALSE(DoubleMap)
TESTSETFLAG_FALSE(DoubleMap)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index e596d5d9540e..8b5e0a9f2431 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
static inline int page_ref_count(struct page *page)
{
- return atomic_read(&page->_count);
+ return atomic_read(&page->_refcount);
}
static inline int page_count(struct page *page)
{
- return atomic_read(&compound_head(page)->_count);
+ return atomic_read(&compound_head(page)->_refcount);
}
static inline void set_page_count(struct page *page, int v)
{
- atomic_set(&page->_count, v);
+ atomic_set(&page->_refcount, v);
if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
__page_ref_set(page, v);
}
@@ -89,35 +89,35 @@ static inline void init_page_count(struct page *page)
static inline void page_ref_add(struct page *page, int nr)
{
- atomic_add(nr, &page->_count);
+ atomic_add(nr, &page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, nr);
}
static inline void page_ref_sub(struct page *page, int nr)
{
- atomic_sub(nr, &page->_count);
+ atomic_sub(nr, &page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, -nr);
}
static inline void page_ref_inc(struct page *page)
{
- atomic_inc(&page->_count);
+ atomic_inc(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, 1);
}
static inline void page_ref_dec(struct page *page)
{
- atomic_dec(&page->_count);
+ atomic_dec(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
__page_ref_mod(page, -1);
}
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
- int ret = atomic_sub_and_test(nr, &page->_count);
+ int ret = atomic_sub_and_test(nr, &page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
__page_ref_mod_and_test(page, -nr, ret);
@@ -126,7 +126,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
static inline int page_ref_dec_and_test(struct page *page)
{
- int ret = atomic_dec_and_test(&page->_count);
+ int ret = atomic_dec_and_test(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
__page_ref_mod_and_test(page, -1, ret);
@@ -135,7 +135,7 @@ static inline int page_ref_dec_and_test(struct page *page)
static inline int page_ref_dec_return(struct page *page)
{
- int ret = atomic_dec_return(&page->_count);
+ int ret = atomic_dec_return(&page->_refcount);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
__page_ref_mod_and_return(page, -1, ret);
@@ -144,7 +144,7 @@ static inline int page_ref_dec_return(struct page *page)
static inline int page_ref_add_unless(struct page *page, int nr, int u)
{
- int ret = atomic_add_unless(&page->_count, nr, u);
+ int ret = atomic_add_unless(&page->_refcount, nr, u);
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
@@ -153,7 +153,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u)
static inline int page_ref_freeze(struct page *page, int count)
{
- int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+ int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
__page_ref_freeze(page, count, ret);
@@ -165,7 +165,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
- atomic_set(&page->_count, count);
+ atomic_set(&page->_refcount, count);
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
__page_ref_unfreeze(page, count);
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1ebd65c91422..fe1513ffb7bf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -86,31 +86,16 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
(__force unsigned long)mask;
}
-/*
- * The page cache can be done in larger chunks than
- * one page, because it allows for more efficient
- * throughput (it can then be mapped into user
- * space in smaller chunks for same flexibility).
- *
- * Or rather, it _will_ be done in larger chunks.
- */
-#define PAGE_CACHE_SHIFT PAGE_SHIFT
-#define PAGE_CACHE_SIZE PAGE_SIZE
-#define PAGE_CACHE_MASK PAGE_MASK
-#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
-
-#define page_cache_get(page) get_page(page)
-#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, bool cold);
/*
* speculatively take a reference to a page.
- * If the page is free (_count == 0), then _count is untouched, and 0
- * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
+ * If the page is free (_refcount == 0), then _refcount is untouched, and 0
+ * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
*
* This function must be called inside the same rcu_read_lock() section as has
* been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _count.
+ * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
*
* Unless an RCU grace period has passed, the count of all pages coming out
* of the allocator must be considered unstable. page_count may return higher
@@ -126,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold);
* 2. conditionally increment refcount
* 3. check the page is still in pagecache (if no, goto 1)
*
- * Remove-side that cares about stability of _count (eg. reclaim) has the
+ * Remove-side that cares about stability of _refcount (eg. reclaim) has the
* following (with tree_lock held for write):
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
* B. remove page from pagecache
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page)
return page->index << compound_order(page);
if (likely(!PageTransTail(page)))
- return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ return page->index;
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ pgoff = compound_head(page)->index;
pgoff += page - compound_head(page);
return pgoff;
}
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page)
*/
static inline loff_t page_offset(struct page *page)
{
- return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+ return ((loff_t)page->index) << PAGE_SHIFT;
}
static inline loff_t page_file_offset(struct page *page)
{
- return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+ return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
}
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
- return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ return pgoff;
}
extern void __lock_page(struct page *page);
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
/*
* Fault a userspace page into pagetables. Return non-zero on a fault.
*
- * This assumes that two userspace pages are always sufficient. That's
- * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ * This assumes that two userspace pages are always sufficient.
*/
static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page,
static inline unsigned long dir_pages(struct inode *inode)
{
- return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 004b8133417d..b67e4df20801 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -166,8 +166,6 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
/* Flag for quirk use to store if quirk-specific ACS is enabled */
PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
- /* Flag to indicate the device uses dma_alias_devfn */
- PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
/* Do not use bus resets for device */
@@ -273,7 +271,7 @@ struct pci_dev {
u8 rom_base_reg; /* which config register controls the ROM */
u8 pin; /* which interrupt pin this device uses */
u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
- u8 dma_alias_devfn;/* devfn of DMA alias, if any */
+ unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */
struct pci_driver *driver; /* which driver has allocated this device */
u64 dma_mask; /* Mask of the bits of bus address this
@@ -1111,6 +1109,7 @@ void pci_unlock_rescan_remove(void);
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_set_vpd_size(struct pci_dev *dev, size_t len);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1164,6 +1163,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
void *alignf_data);
+int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+unsigned long pci_address_to_pio(phys_addr_t addr);
+phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
@@ -1480,6 +1482,8 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
+static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
+
static inline void pci_block_cfg_access(struct pci_dev *dev) { }
static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
{ return 0; }
@@ -1663,7 +1667,7 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
-void pci_dev_specific_enable_acs(struct pci_dev *dev);
+int pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
@@ -1672,7 +1676,10 @@ static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
{
return -ENOTTY;
}
-static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { }
+static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ return -ENOTTY;
+}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
@@ -1988,6 +1995,8 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
}
#endif
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 247da8c95860..c58752fe16c4 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2604,6 +2604,24 @@
#define PCI_DEVICE_ID_INTEL_82441 0x1237
#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_LIGHT_RIDGE 0x1513 /* Tbt 1 Gen 1 */
+#define PCI_DEVICE_ID_INTEL_EAGLE_RIDGE 0x151a
+#define PCI_DEVICE_ID_INTEL_LIGHT_PEAK 0x151b
+#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C 0x1547 /* Tbt 1 Gen 2 */
+#define PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C 0x1548
+#define PCI_DEVICE_ID_INTEL_PORT_RIDGE 0x1549
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_NHI 0x1566 /* Tbt 1 Gen 3 */
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE 0x1567
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_NHI 0x1568
+#define PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE 0x1569
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI 0x156a /* Thunderbolt 2 */
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE 0x156b
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI 0x156c
+#define PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE 0x156d
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI 0x1575 /* Thunderbolt 3 */
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 4f1089f2cc98..afcd130ab3a9 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -21,6 +21,8 @@
#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
+#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */
+#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
struct pcie_device {
int irq; /* Service IRQ/MSI/MSI-X Vector */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 4196c90a3c88..d28ac05c7f92 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -105,6 +105,8 @@ struct arm_pmu {
struct mutex reserve_mutex;
u64 max_period;
bool secure_access; /* 32-bit ARM only */
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+ DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct notifier_block hotplug_nb;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f291275ffd71..44f33834ad78 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -58,7 +58,7 @@ struct perf_guest_info_callbacks {
struct perf_callchain_entry {
__u64 nr;
- __u64 ip[PERF_MAX_STACK_DEPTH];
+ __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_raw_record {
@@ -151,6 +151,15 @@ struct hw_perf_event {
*/
struct task_struct *target;
+ /*
+ * PMU would store hardware filter configuration
+ * here.
+ */
+ void *addr_filters;
+
+ /* Last sync'ed generation of filters */
+ unsigned long addr_filters_gen;
+
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
@@ -216,6 +225,7 @@ struct perf_event;
#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
#define PERF_PMU_CAP_EXCLUSIVE 0x10
#define PERF_PMU_CAP_ITRACE 0x20
+#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
/**
* struct pmu - generic performance monitoring unit
@@ -240,6 +250,9 @@ struct pmu {
int task_ctx_nr;
int hrtimer_interval_ms;
+ /* number of address filters this PMU can do */
+ unsigned int nr_addr_filters;
+
/*
* Fully disable/enable this PMU, can be used to protect from the PMI
* as well as for lazy/batch writing of the MSRs.
@@ -393,12 +406,71 @@ struct pmu {
void (*free_aux) (void *aux); /* optional */
/*
+ * Validate address range filters: make sure the HW supports the
+ * requested configuration and number of filters; return 0 if the
+ * supplied filters are valid, -errno otherwise.
+ *
+ * Runs in the context of the ioctl()ing process and is not serialized
+ * with the rest of the PMU callbacks.
+ */
+ int (*addr_filters_validate) (struct list_head *filters);
+ /* optional */
+
+ /*
+ * Synchronize address range filter configuration:
+ * translate hw-agnostic filters into hardware configuration in
+ * event::hw::addr_filters.
+ *
+ * Runs as a part of filter sync sequence that is done in ->start()
+ * callback by calling perf_event_addr_filters_sync().
+ *
+ * May (and should) traverse event::addr_filters::list, for which its
+ * caller provides necessary serialization.
+ */
+ void (*addr_filters_sync) (struct perf_event *event);
+ /* optional */
+
+ /*
* Filter events for PMU-specific reasons.
*/
int (*filter_match) (struct perf_event *event); /* optional */
};
/**
+ * struct perf_addr_filter - address range filter definition
+ * @entry: event's filter list linkage
+ * @inode: object file's inode for file-based filters
+ * @offset: filter range offset
+ * @size: filter range size
+ * @range: 1: range, 0: address
+ * @filter: 1: filter/start, 0: stop
+ *
+ * This is a hardware-agnostic filter configuration as specified by the user.
+ */
+struct perf_addr_filter {
+ struct list_head entry;
+ struct inode *inode;
+ unsigned long offset;
+ unsigned long size;
+ unsigned int range : 1,
+ filter : 1;
+};
+
+/**
+ * struct perf_addr_filters_head - container for address range filters
+ * @list: list of filters for this event
+ * @lock: spinlock that serializes accesses to the @list and event's
+ * (and its children's) filter generations.
+ *
+ * A child event will use parent's @list (and therefore @lock), so they are
+ * bundled together; see perf_event_addr_filters().
+ */
+struct perf_addr_filters_head {
+ struct list_head list;
+ raw_spinlock_t lock;
+};
+
+/**
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
@@ -566,6 +638,12 @@ struct perf_event {
atomic_t event_limit;
+ /* address range filters */
+ struct perf_addr_filters_head addr_filters;
+ /* vma address array for file-based filders */
+ unsigned long *addr_filters_offs;
+ unsigned long addr_filters_gen;
+
void (*destroy)(struct perf_event *);
struct rcu_head rcu_head;
@@ -834,9 +912,25 @@ extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs);
+extern void perf_event_output_forward(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+extern void perf_event_output_backward(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
extern void perf_event_output(struct perf_event *event,
- struct perf_sample_data *data,
- struct pt_regs *regs);
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+static inline bool
+is_default_overflow_handler(struct perf_event *event)
+{
+ if (likely(event->overflow_handler == perf_event_output_forward))
+ return true;
+ if (unlikely(event->overflow_handler == perf_event_output_backward))
+ return true;
+ return false;
+}
extern void
perf_event_header__init_id(struct perf_event_header *header,
@@ -882,8 +976,6 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
- memset(regs, 0, sizeof(*regs));
-
perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
}
@@ -977,9 +1069,11 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
+extern int sysctl_perf_event_max_stack;
+
static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
- if (entry->nr < PERF_MAX_STACK_DEPTH) {
+ if (entry->nr < sysctl_perf_event_max_stack) {
entry->ip[entry->nr++] = ip;
return 0;
} else {
@@ -1001,6 +1095,8 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+int perf_event_max_stack_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
static inline bool perf_paranoid_tracepoint_raw(void)
{
@@ -1018,7 +1114,7 @@ static inline bool perf_paranoid_kernel(void)
}
extern void perf_event_init(void);
-extern void perf_tp_event(u64 addr, u64 count, void *record,
+extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
struct task_struct *task);
@@ -1045,8 +1141,41 @@ static inline bool has_aux(struct perf_event *event)
return event->pmu->setup_aux;
}
+static inline bool is_write_backward(struct perf_event *event)
+{
+ return !!event->attr.write_backward;
+}
+
+static inline bool has_addr_filter(struct perf_event *event)
+{
+ return event->pmu->nr_addr_filters;
+}
+
+/*
+ * An inherited event uses parent's filters
+ */
+static inline struct perf_addr_filters_head *
+perf_event_addr_filters(struct perf_event *event)
+{
+ struct perf_addr_filters_head *ifh = &event->addr_filters;
+
+ if (event->parent)
+ ifh = &event->parent->addr_filters;
+
+ return ifh;
+}
+
+extern void perf_event_addr_filters_sync(struct perf_event *event);
+
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size);
+extern int perf_output_begin_forward(struct perf_output_handle *handle,
+ struct perf_event *event,
+ unsigned int size);
+extern int perf_output_begin_backward(struct perf_output_handle *handle,
+ struct perf_event *event,
+ unsigned int size);
+
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2abd7918f64f..2d24b283aa2d 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -805,6 +805,10 @@ void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd);
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
@@ -825,6 +829,10 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
void phy_ethtool_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd);
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 8cf05e341cff..a810f2a18842 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -77,6 +77,7 @@ struct phy {
*/
struct phy_provider {
struct device *dev;
+ struct device_node *children;
struct module *owner;
struct list_head list;
struct phy * (*of_xlate)(struct device *dev,
@@ -93,10 +94,16 @@ struct phy_lookup {
#define to_phy(a) (container_of((a), struct phy, dev))
#define of_phy_provider_register(dev, xlate) \
- __of_phy_provider_register((dev), THIS_MODULE, (xlate))
+ __of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
#define devm_of_phy_provider_register(dev, xlate) \
- __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
+ __devm_of_phy_provider_register((dev), NULL, THIS_MODULE, (xlate))
+
+#define of_phy_provider_register_full(dev, children, xlate) \
+ __of_phy_provider_register(dev, children, THIS_MODULE, xlate)
+
+#define devm_of_phy_provider_register_full(dev, children, xlate) \
+ __devm_of_phy_provider_register(dev, children, THIS_MODULE, xlate)
static inline void phy_set_drvdata(struct phy *phy, void *data)
{
@@ -147,11 +154,13 @@ struct phy *devm_phy_create(struct device *dev, struct device_node *node,
void phy_destroy(struct phy *phy);
void devm_phy_destroy(struct device *dev, struct phy *phy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
- struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
- struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
void of_phy_provider_unregister(struct phy_provider *phy_provider);
void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider);
@@ -312,15 +321,17 @@ static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
}
static inline struct phy_provider *__of_phy_provider_register(
- struct device *dev, struct module *owner, struct phy * (*of_xlate)(
- struct device *dev, struct of_phandle_args *args))
+ struct device *dev, struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
static inline struct phy_provider *__devm_of_phy_provider_register(struct device
- *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ *dev, struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
new file mode 100644
index 000000000000..8e1a57a78d9f
--- /dev/null
+++ b/include/linux/phy/tegra/xusb.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef PHY_TEGRA_XUSB_H
+#define PHY_TEGRA_XUSB_H
+
+struct tegra_xusb_padctl;
+struct device;
+
+struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
+void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
+
+int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
+int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+ unsigned int port, bool idle);
+int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+ unsigned int port, bool enable);
+
+#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 9ba59fcba549..a42e57da270d 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -144,6 +144,12 @@ struct pinctrl_desc {
extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data);
+extern void devm_pinctrl_unregister(struct device *dev,
+ struct pinctrl_dev *pctldev);
+
extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 03b6095d3b18..d15d8ba8cc24 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -21,15 +21,15 @@
* @dma_dev: required DMA master device
* @src_id: src request line
* @dst_id: dst request line
- * @src_master: src master for transfers on allocated channel.
- * @dst_master: dest master for transfers on allocated channel.
+ * @m_master: memory master for transfers on allocated channel
+ * @p_master: peripheral master for transfers on allocated channel
*/
struct dw_dma_slave {
struct device *dma_dev;
u8 src_id;
u8 dst_id;
- u8 src_master;
- u8 dst_master;
+ u8 m_master;
+ u8 p_master;
};
/**
@@ -43,7 +43,7 @@ struct dw_dma_slave {
* @block_size: Maximum block size supported by the controller
* @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
- * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ * (in bytes, power of 2)
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
@@ -55,7 +55,7 @@ struct dw_dma_platform_data {
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
unsigned char chan_priority;
- unsigned short block_size;
+ unsigned int block_size;
unsigned char nr_masters;
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
};
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 28702c849af1..2dc7f4a8ab09 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -15,8 +15,7 @@
#define GPIO_DW_APB_H
struct dwapb_port_property {
- struct device_node *node;
- const char *name;
+ struct fwnode_handle *fwnode;
unsigned int idx;
unsigned int ngpio;
unsigned int gpio_base;
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 104aa892f31b..3038120ca46e 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -5,6 +5,7 @@ struct lirc_rx51_platform_data {
int pwm_timer;
int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
+ struct pwm_omap_dmtimer_pdata *dmtimer;
};
#endif
diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h
index 59384217208f..e7d521e48855 100644
--- a/include/linux/platform_data/pwm_omap_dmtimer.h
+++ b/include/linux/platform_data/pwm_omap_dmtimer.h
@@ -35,6 +35,16 @@
#ifndef __PWM_OMAP_DMTIMER_PDATA_H
#define __PWM_OMAP_DMTIMER_PDATA_H
+/* clock sources */
+#define PWM_OMAP_DMTIMER_SRC_SYS_CLK 0x00
+#define PWM_OMAP_DMTIMER_SRC_32_KHZ 0x01
+#define PWM_OMAP_DMTIMER_SRC_EXT_CLK 0x02
+
+/* timer interrupt enable bits */
+#define PWM_OMAP_DMTIMER_INT_CAPTURE (1 << 2)
+#define PWM_OMAP_DMTIMER_INT_OVERFLOW (1 << 1)
+#define PWM_OMAP_DMTIMER_INT_MATCH (1 << 0)
+
/* trigger types */
#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00
#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01
@@ -45,15 +55,23 @@ typedef struct omap_dm_timer pwm_omap_dmtimer;
struct pwm_omap_dmtimer_pdata {
pwm_omap_dmtimer *(*request_by_node)(struct device_node *np);
+ pwm_omap_dmtimer *(*request_specific)(int timer_id);
+ pwm_omap_dmtimer *(*request)(void);
+
int (*free)(pwm_omap_dmtimer *timer);
void (*enable)(pwm_omap_dmtimer *timer);
void (*disable)(pwm_omap_dmtimer *timer);
+ int (*get_irq)(pwm_omap_dmtimer *timer);
+ int (*set_int_enable)(pwm_omap_dmtimer *timer, unsigned int value);
+ int (*set_int_disable)(pwm_omap_dmtimer *timer, u32 mask);
+
struct clk *(*get_fclk)(pwm_omap_dmtimer *timer);
int (*start)(pwm_omap_dmtimer *timer);
int (*stop)(pwm_omap_dmtimer *timer);
+ int (*set_source)(pwm_omap_dmtimer *timer, int source);
int (*set_load)(pwm_omap_dmtimer *timer, int autoreload,
unsigned int value);
@@ -63,7 +81,10 @@ struct pwm_omap_dmtimer_pdata {
int toggle, int trigger);
int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler);
+ unsigned int (*read_counter)(pwm_omap_dmtimer *timer);
int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value);
+ unsigned int (*read_status)(pwm_omap_dmtimer *timer);
+ int (*write_status)(pwm_omap_dmtimer *timer, unsigned int value);
};
#endif /* __PWM_OMAP_DMTIMER_PDATA_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 03b755521fd9..98c2a7c7108e 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -18,7 +18,7 @@
#define PLATFORM_DEVID_AUTO (-2)
struct mfd_cell;
-struct property_set;
+struct property_entry;
struct platform_device {
const char *name;
@@ -73,7 +73,7 @@ struct platform_device_info {
size_t size_data;
u64 dma_mask;
- const struct property_set *pset;
+ struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
@@ -172,7 +172,7 @@ extern int platform_device_add_resources(struct platform_device *pdev,
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
extern int platform_device_add_properties(struct platform_device *pdev,
- const struct property_set *pset);
+ struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 6a5d654f4447..06eb353182ab 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -563,7 +563,6 @@ struct dev_pm_info {
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
- bool ignore_children:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
spinlock_t lock;
@@ -591,6 +590,7 @@ struct dev_pm_info {
unsigned int deferred_resume:1;
unsigned int run_wake:1;
unsigned int runtime_auto:1;
+ bool ignore_children:1;
unsigned int no_callbacks:1;
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 49cd8890b873..39285c7bd3f5 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -28,14 +28,12 @@ enum gpd_status {
struct dev_power_governor {
bool (*power_down_ok)(struct dev_pm_domain *domain);
- bool (*stop_ok)(struct device *dev);
+ bool (*suspend_ok)(struct device *dev);
};
struct gpd_dev_ops {
int (*start)(struct device *dev);
int (*stop)(struct device *dev);
- int (*save_state)(struct device *dev);
- int (*restore_state)(struct device *dev);
bool (*active_wakeup)(struct device *dev);
};
@@ -94,7 +92,7 @@ struct gpd_timing_data {
s64 resume_latency_ns;
s64 effective_constraint_ns;
bool constraint_changed;
- bool cached_stop_ok;
+ bool cached_suspend_ok;
};
struct pm_domain_data {
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cccaf4a29e9f..bca26157f5b6 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev);
int dev_pm_opp_set_regulator(struct device *dev, const char *name);
void dev_pm_opp_put_regulator(struct device *dev);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
+void dev_pm_opp_remove_table(struct device *dev);
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
@@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
struct device *dev)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline int dev_pm_opp_set_supported_hw(struct device *dev,
const u32 *versions,
unsigned int count)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_put_regulator(struct device *dev) {}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
+ return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
+{
+ return -ENOTSUPP;
+}
+
+static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+{
return -EINVAL;
}
+static inline void dev_pm_opp_remove_table(struct device *dev)
+{
+}
+
+static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
void dev_pm_opp_of_remove_table(struct device *dev);
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask);
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask);
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
- return -EINVAL;
+ return -ENOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
-static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
- return -ENOSYS;
-}
-
-static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
+ return -ENOTSUPP;
}
-static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
{
- return -ENOSYS;
}
-static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7af093d6a4dd..2e14d2667b6c 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -56,6 +56,11 @@ extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+{
+ dev->power.ignore_children = enable;
+}
+
static inline bool pm_children_suspended(struct device *dev)
{
return dev->power.ignore_children
@@ -156,6 +161,7 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 3ec5309e29f3..57d146fe44dd 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -42,6 +42,13 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
BUG();
}
+static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
+ size_t n)
+{
+ BUG();
+ return -EFAULT;
+}
+
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i)
{
@@ -65,20 +72,33 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
}
#endif
-/*
- * Architectures that define ARCH_HAS_PMEM_API must provide
- * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
- * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem()
- * and arch_has_wmb_pmem().
- */
-static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
+static inline bool arch_has_pmem_api(void)
{
- memcpy(dst, (void __force const *) src, size);
+ return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
-static inline bool arch_has_pmem_api(void)
+static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
+ size_t size)
{
- return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
+ memcpy(dst, (void __force *) src, size);
+ return 0;
+}
+
+/*
+ * memcpy_from_pmem - read from persistent memory with error handling
+ * @dst: destination buffer
+ * @src: source buffer
+ * @size: transfer length
+ *
+ * Returns 0 on success negative error code on failure.
+ */
+static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
+ size_t size)
+{
+ if (arch_has_pmem_api())
+ return arch_memcpy_from_pmem(dst, src, size);
+ else
+ return default_memcpy_from_pmem(dst, src, size);
}
/**
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 5df733b8f704..2588ca6a9028 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -337,9 +337,11 @@ extern struct mutex pnp_res_mutex;
#ifdef CONFIG_PNPBIOS
extern struct pnp_protocol pnpbios_protocol;
+extern bool arch_pnpbios_disabled(void);
#define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol))
#else
#define pnp_device_is_pnpbios(dev) 0
+#define arch_pnpbios_disabled() false
#endif
#ifdef CONFIG_PNPACPI
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 9fb4f40d9a26..37b057b63b46 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
-extern u64 select_estimate_accuracy(struct timespec *tv);
+extern u64 select_estimate_accuracy(struct timespec64 *tv);
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
@@ -153,12 +153,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
-extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
+extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time);
extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
- struct timespec *end_time);
+ struct timespec64 *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timespec *end_time);
+ fd_set __user *exp, struct timespec64 *end_time);
-extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
+extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
+ long nsec);
#endif /* _LINUX_POLL_H */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 3e96a6a76103..5b5a80cc5926 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -99,7 +99,6 @@ extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
extern int simple_set_acl(struct inode *, struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
-struct posix_acl **acl_by_type(struct inode *inode, int type);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
diff --git a/include/linux/property.h b/include/linux/property.h
index b51fcd36d892..ecab11e40794 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -238,18 +238,9 @@ struct property_entry {
.name = _name_, \
}
-/**
- * struct property_set - Collection of "built-in" device properties.
- * @fwnode: Handle to be pointed to by the fwnode field of struct device.
- * @properties: Array of properties terminated with a null entry.
- */
-struct property_set {
- struct fwnode_handle fwnode;
- struct property_entry *properties;
-};
-
-int device_add_property_set(struct device *dev, const struct property_set *pset);
-void device_remove_property_set(struct device *dev);
+int device_add_properties(struct device *dev,
+ struct property_entry *properties);
+void device_remove_properties(struct device *dev);
bool device_dma_supported(struct device *dev);
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
deleted file mode 100644
index 21221338ad18..000000000000
--- a/include/linux/proportions.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * FLoating proportions
- *
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * This file contains the public data structure and API definitions.
- */
-
-#ifndef _LINUX_PROPORTIONS_H
-#define _LINUX_PROPORTIONS_H
-
-#include <linux/percpu_counter.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-
-struct prop_global {
- /*
- * The period over which we differentiate
- *
- * period = 2^shift
- */
- int shift;
- /*
- * The total event counter aka 'time'.
- *
- * Treated as an unsigned long; the lower 'shift - 1' bits are the
- * counter bits, the remaining upper bits the period counter.
- */
- struct percpu_counter events;
-};
-
-/*
- * global proportion descriptor
- *
- * this is needed to consistently flip prop_global structures.
- */
-struct prop_descriptor {
- int index;
- struct prop_global pg[2];
- struct mutex mutex; /* serialize the prop_global switch */
-};
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
-void prop_change_shift(struct prop_descriptor *pd, int new_shift);
-
-/*
- * ----- PERCPU ------
- */
-
-struct prop_local_percpu {
- /*
- * the local events counter
- */
- struct percpu_counter events;
-
- /*
- * snapshot of the last seen global state
- */
- int shift;
- unsigned long period;
- raw_spinlock_t lock; /* protect the snapshot state */
-};
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
-void prop_local_destroy_percpu(struct prop_local_percpu *pl);
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
-void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
- long *numerator, long *denominator);
-
-static inline
-void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __prop_inc_percpu(pd, pl);
- local_irq_restore(flags);
-}
-
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter and fraction multiply.
- */
-#if BITS_PER_LONG == 32
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-#else
-#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
-#endif
-
-#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
-#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
-
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
- struct prop_local_percpu *pl, long frac);
-
-
-/*
- * ----- SINGLE ------
- */
-
-struct prop_local_single {
- /*
- * the local events counter
- */
- unsigned long events;
-
- /*
- * snapshot of the last seen global state
- * and a lock protecting this state
- */
- unsigned long period;
- int shift;
- raw_spinlock_t lock; /* protect the snapshot state */
-};
-
-#define INIT_PROP_LOCAL_SINGLE(name) \
-{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-}
-
-int prop_local_init_single(struct prop_local_single *pl);
-void prop_local_destroy_single(struct prop_local_single *pl);
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
-void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
- long *numerator, long *denominator);
-
-static inline
-void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __prop_inc_single(pd, pl);
- local_irq_restore(flags);
-}
-
-#endif /* _LINUX_PROPORTIONS_H */
diff --git a/include/linux/psci.h b/include/linux/psci.h
index 393efe2edf9a..bdea1cb5e1db 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -21,8 +21,6 @@
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
bool psci_tos_resident_on(int cpu);
-bool psci_power_state_loses_context(u32 state);
-bool psci_power_state_is_valid(u32 state);
int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index cfc3ed46cad2..b78d27c42629 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -74,6 +74,24 @@ enum pwm_polarity {
PWM_POLARITY_INVERSED,
};
+/**
+ * struct pwm_args - board-dependent PWM arguments
+ * @period: reference period
+ * @polarity: reference polarity
+ *
+ * This structure describes board-dependent arguments attached to a PWM
+ * device. These arguments are usually retrieved from the PWM lookup table or
+ * device tree.
+ *
+ * Do not confuse this with the PWM state: PWM arguments represent the initial
+ * configuration that users want to use on this PWM device rather than the
+ * current PWM hardware state.
+ */
+struct pwm_args {
+ unsigned int period;
+ enum pwm_polarity polarity;
+};
+
enum {
PWMF_REQUESTED = 1 << 0,
PWMF_ENABLED = 1 << 1,
@@ -92,6 +110,7 @@ enum {
* @period: period of the PWM signal (in nanoseconds)
* @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
* @polarity: polarity of the PWM signal
+ * @args: PWM arguments
*/
struct pwm_device {
const char *label;
@@ -105,6 +124,8 @@ struct pwm_device {
unsigned int period;
unsigned int duty_cycle;
enum pwm_polarity polarity;
+
+ struct pwm_args args;
};
static inline bool pwm_is_enabled(const struct pwm_device *pwm)
@@ -144,6 +165,18 @@ static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
}
+static inline void pwm_get_args(const struct pwm_device *pwm,
+ struct pwm_args *args)
+{
+ *args = pwm->args;
+}
+
+static inline void pwm_apply_args(struct pwm_device *pwm)
+{
+ pwm_set_period(pwm, pwm->args.period);
+ pwm_set_polarity(pwm, pwm->args.polarity);
+}
+
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 53ecb37ae563..3f14c7efe68f 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -285,6 +285,63 @@
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
+ PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ + 4)
+#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
+ PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
+ + \
+ PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
+ - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
@@ -327,9 +384,14 @@ struct regpair {
__le32 hi;
};
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr;
+};
+
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
struct async_data async_info;
};
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index e1d69834a11f..6ae8cb4a61d3 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -13,6 +13,7 @@
#include <linux/if_link.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_iov_if.h>
struct qed_dev_eth_info {
struct qed_dev_info common;
@@ -27,12 +28,15 @@ struct qed_dev_eth_info {
struct qed_update_vport_rss_params {
u16 rss_ind_table[128];
u32 rss_key[10];
+ u8 rss_caps;
};
struct qed_update_vport_params {
u8 vport_id;
u8 update_vport_active_flg;
u8 vport_active_flg;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 update_rss_flg;
@@ -111,12 +115,23 @@ struct qed_queue_start_common_params {
u16 sb_idx;
};
+struct qed_tunn_params {
+ u16 vxlan_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u8 update_geneve_port;
+};
+
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
+ void (*force_mac) (void *dev, u8 *mac);
};
struct qed_eth_ops {
const struct qed_common_ops *common;
+#ifdef CONFIG_QED_SRIOV
+ const struct qed_iov_hv_ops *iov;
+#endif
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
@@ -125,6 +140,8 @@ struct qed_eth_ops {
struct qed_eth_cb_ops *ops,
void *cookie);
+ bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
+
int (*vport_start)(struct qed_dev *cdev,
struct qed_start_vport_params *params);
@@ -165,9 +182,12 @@ struct qed_eth_ops {
void (*get_vport_stats)(struct qed_dev *cdev,
struct qed_eth_stats *stats);
+
+ int (*tunn_config)(struct qed_dev *cdev,
+ struct qed_tunn_params *params);
};
-const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+const struct qed_eth_ops *qed_get_eth_ops(void);
void qed_put_eth_ops(void);
#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 1f7599c77cd4..4c29439f54bf 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -25,6 +25,15 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+enum dcbx_protocol_type {
+ DCBX_PROTOCOL_ISCSI,
+ DCBX_PROTOCOL_FCOE,
+ DCBX_PROTOCOL_ROCE,
+ DCBX_PROTOCOL_ROCE_V2,
+ DCBX_PROTOCOL_ETH,
+ DCBX_MAX_PROTOCOL_TYPE
+};
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -93,6 +102,7 @@ struct qed_dev_info {
u32 flash_size;
u8 mf_mode;
+ bool tx_switching;
};
enum qed_sb_type {
@@ -110,6 +120,7 @@ struct qed_link_params {
#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
u32 override_flags;
bool autoneg;
u32 adv_speeds;
@@ -118,6 +129,12 @@ struct qed_link_params {
#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
u32 pause_config;
+#define QED_LINK_LOOPBACK_NONE BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
+#define QED_LINK_LOOPBACK_EXT BIT(3)
+#define QED_LINK_LOOPBACK_MAC BIT(4)
+ u32 loopback_mode;
};
struct qed_link_output {
@@ -133,6 +150,13 @@ struct qed_link_output {
u32 pause_config;
};
+struct qed_probe_params {
+ enum qed_protocol protocol;
+ u32 dp_module;
+ u8 dp_level;
+ bool is_vf;
+};
+
#define QED_DRV_VER_STR_SIZE 12
struct qed_slowpath_params {
u32 int_mode;
@@ -158,10 +182,49 @@ struct qed_common_cb_ops {
struct qed_link_output *link);
};
+struct qed_selftest_ops {
+/**
+ * @brief selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_interrupt)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_memory)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_register)(struct qed_dev *cdev);
+
+/**
+ * @brief selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*selftest_clock)(struct qed_dev *cdev);
+};
+
struct qed_common_ops {
+ struct qed_selftest_ops *selftest;
+
struct qed_dev* (*probe)(struct pci_dev *dev,
- enum qed_protocol protocol,
- u32 dp_module, u8 dp_level);
+ struct qed_probe_params *params);
void (*remove)(struct qed_dev *cdev);
@@ -211,6 +274,16 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+
+/**
+ * @brief can_link_change - can the instance change the link or not
+ *
+ * @param cdev
+ *
+ * @return true if link-change is allowed, false otherwise.
+ */
+ bool (*can_link_change)(struct qed_dev *cdev);
+
/**
* @brief set_link - set links according to params
*
@@ -271,15 +344,6 @@ struct qed_common_ops {
enum qed_led_mode mode);
};
-/**
- * @brief qed_get_protocol_version
- *
- * @param protocol
- *
- * @return version supported by qed for given protocol driver
- */
-u32 qed_get_protocol_version(enum qed_protocol protocol);
-
#define MASK_FIELD(_name, _value) \
((_value) &= (_name ## _MASK))
@@ -393,16 +457,16 @@ struct qed_eth_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -524,4 +588,15 @@ static inline void internal_ram_wr(void __iomem *addr,
__internal_ram_wr(NULL, addr, size, data);
}
+enum qed_rss_caps {
+ QED_RSS_IPV4 = 0x1,
+ QED_RSS_IPV6 = 0x2,
+ QED_RSS_IPV4_TCP = 0x4,
+ QED_RSS_IPV6_TCP = 0x8,
+ QED_RSS_IPV4_UDP = 0x10,
+ QED_RSS_IPV6_UDP = 0x20,
+};
+
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
#endif
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
new file mode 100644
index 000000000000..5a4f8d0899e9
--- /dev/null
+++ b/include/linux/qed/qed_iov_if.h
@@ -0,0 +1,34 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IOV_IF_H
+#define _QED_IOV_IF_H
+
+#include <linux/qed/qed_if.h>
+
+/* Structs used by PF to control and manipulate child VFs */
+struct qed_iov_hv_ops {
+ int (*configure)(struct qed_dev *cdev, int num_vfs_param);
+
+ int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
+
+ int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
+
+ int (*get_config) (struct qed_dev *cdev, int vf_id,
+ struct ifla_vf_info *ivi);
+
+ int (*set_link_state) (struct qed_dev *cdev, int vf_id,
+ int link_state);
+
+ int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
+
+ int (*set_rate) (struct qed_dev *cdev, int vfid,
+ u32 min_rate, u32 max_rate);
+};
+
+#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 17d4f849c65e..8beb98dcf14f 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -488,6 +488,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
}
/**
+ * hlist_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_tail_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *i, *last = NULL;
+
+ for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_add_head_rcu(n, h);
+ }
+}
+
+/**
* hlist_add_before_rcu
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7da4a7..4ae95f7e8597 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
if (!is_a_nulls(first))
first->pprev = &n->next;
}
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the end of the specified hlist_nulls,
+ * while permitting racing traversals. NOTE: tail insertion requires
+ * list traversal.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *i, *last = NULL;
+
+ for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
+ i = hlist_nulls_next_rcu(i))
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
+ } else {
+ hlist_nulls_add_head_rcu(n, h);
+ }
+}
+
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 2657aff2725b..5f1533e3d032 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -508,14 +508,7 @@ int rcu_read_lock_bh_held(void);
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
* critical section unless it can prove otherwise.
*/
-#ifdef CONFIG_PREEMPT_COUNT
int rcu_read_lock_sched_held(void);
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
- return 1;
-}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -532,18 +525,10 @@ static inline int rcu_read_lock_bh_held(void)
return 1;
}
-#ifdef CONFIG_PREEMPT_COUNT
static inline int rcu_read_lock_sched_held(void)
{
- return preempt_count() != 0 || irqs_disabled();
-}
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
- return 1;
+ return !preemptible();
}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
-
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
@@ -1144,4 +1129,17 @@ static inline void rcu_sysidle_force_exit(void)
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+/*
+ * Dump the ftrace buffer, but only one time per callsite per boot.
+ */
+#define rcu_ftrace_dump(oops_dump_mode) \
+do { \
+ static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
+ \
+ if (!atomic_read(&___rfd_beenhere) && \
+ !atomic_xchg(&___rfd_beenhere, 1)) \
+ ftrace_dump(oops_dump_mode); \
+} while (0)
+
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 64809aea661c..93aea75029fb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -149,6 +149,22 @@ static inline unsigned long rcu_batches_completed_sched(void)
return 0;
}
+/*
+ * Return the number of expedited grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of expedited sched grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed_sched(void)
+{
+ return 0;
+}
+
static inline void rcu_force_quiescent_state(void)
{
}
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index ad1eda9fa4da..5043cb823fb2 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -87,6 +87,8 @@ unsigned long rcu_batches_started_sched(void);
unsigned long rcu_batches_completed(void);
unsigned long rcu_batches_completed_bh(void);
unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_exp_batches_completed(void);
+unsigned long rcu_exp_batches_completed_sched(void);
void show_rcu_gp_kthreads(void);
void rcu_force_quiescent_state(void);
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
index 2eb386017fa5..113d861a1e4c 100644
--- a/include/linux/regulator/act8865.h
+++ b/include/linux/regulator/act8865.h
@@ -69,11 +69,13 @@ enum {
* @id: regulator id
* @name: regulator name
* @init_data: regulator init data
+ * @of_node: device tree node (optional)
*/
struct act8865_regulator_data {
int id;
const char *name;
struct regulator_init_data *init_data;
+ struct device_node *of_node;
};
/**
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index cd271e89a7e6..fcfa40a6692c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -93,6 +93,9 @@ struct regulator_linear_range {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
+ * @set_over_current_protection: Support capability of automatically shutting
+ * down when detecting an over current event.
+ *
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
* @set_mode: Set the configured operating mode for the regulator.
@@ -255,6 +258,8 @@ enum regulator_type {
*
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* @vsel_mask: Mask for register bitfield used for selector
+ * @csel_reg: Register for TPS65218 LS3 current regulator
+ * @csel_mask: Mask for TPS65218 LS3 current regulator
* @apply_reg: Register for initiate voltage change on the output when
* using regulator_set_voltage_sel_regmap
* @apply_bit: Register bitfield used for initiate voltage change on the
@@ -292,7 +297,7 @@ struct regulator_desc {
const struct regulator_desc *,
struct regulator_config *);
int id;
- bool continuous_voltage_range;
+ unsigned int continuous_voltage_range:1;
unsigned n_voltages;
const struct regulator_ops *ops;
int irq;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 5d627c83a630..ad3e5158e586 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -97,6 +97,7 @@ struct regulator_state {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @over_current_protection: Auto disable on over current event.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index f6a8a16a0d4d..2fcb9980262a 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -54,6 +54,10 @@
* @reg_init_data: The regulator init data.
* @control_flags: Control flags which are ORed value of above flags to
* configure device.
+ * @junction_temp_warning: Junction temp in millicelcius on which warning need
+ * to be set. Thermal functionality is only supported on
+ * MAX77621. The threshold warning supported by MAX77621
+ * are 120C and 140C.
* @enable_ext_control: Enable the voltage enable/disable through external
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
@@ -67,6 +71,7 @@
struct max8973_regulator_platform_data {
struct regulator_init_data *reg_init_data;
unsigned long control_flags;
+ unsigned long junction_temp_warning;
bool enable_ext_control;
int enable_gpio;
int dvs_gpio;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c4e1384f636..1c457a8dd5a6 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -365,6 +365,8 @@ enum rproc_state {
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
+ * @RPROC_WATCHDOG: watchdog bite
+ * @RPROC_FATAL_ERROR fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -373,6 +375,8 @@ enum rproc_state {
*/
enum rproc_crash_type {
RPROC_MMUFAULT,
+ RPROC_WATCHDOG,
+ RPROC_FATAL_ERROR,
};
/**
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index a3a5bcdb1d02..b91ba932bbd4 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -31,6 +31,7 @@ struct of_phandle_args;
* @ops: a pointer to device specific struct reset_control_ops
* @owner: kernel module of the reset controller driver
* @list: internal list of reset controller devices
+ * @reset_control_head: head of internal list of requested reset controls
* @of_node: corresponding device tree node as phandle target
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
@@ -41,6 +42,7 @@ struct reset_controller_dev {
const struct reset_control_ops *ops;
struct module *owner;
struct list_head list;
+ struct list_head reset_control_head;
struct device_node *of_node;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
diff --git a/include/linux/reset.h b/include/linux/reset.h
index c4c097de0ba9..ec0306ce7b92 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -1,8 +1,8 @@
#ifndef _LINUX_RESET_H_
#define _LINUX_RESET_H_
-struct device;
-struct device_node;
+#include <linux/device.h>
+
struct reset_control;
#ifdef CONFIG_RESET_CONTROLLER
@@ -12,9 +12,11 @@ int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
int reset_control_status(struct reset_control *rstc);
-struct reset_control *reset_control_get(struct device *dev, const char *id);
+struct reset_control *__of_reset_control_get(struct device_node *node,
+ const char *id, int index, int shared);
void reset_control_put(struct reset_control *rstc);
-struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
+struct reset_control *__devm_reset_control_get(struct device *dev,
+ const char *id, int index, int shared);
int __must_check device_reset(struct device *dev);
@@ -23,24 +25,6 @@ static inline int device_reset_optional(struct device *dev)
return device_reset(dev);
}
-static inline struct reset_control *reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return reset_control_get(dev, id);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
- struct device *dev, const char *id)
-{
- return devm_reset_control_get(dev, id);
-}
-
-struct reset_control *of_reset_control_get(struct device_node *node,
- const char *id);
-
-struct reset_control *of_reset_control_get_by_index(
- struct device_node *node, int index);
-
#else
static inline int reset_control_reset(struct reset_control *rstc)
@@ -72,49 +56,191 @@ static inline void reset_control_put(struct reset_control *rstc)
WARN_ON(1);
}
+static inline int __must_check device_reset(struct device *dev)
+{
+ WARN_ON(1);
+ return -ENOTSUPP;
+}
+
static inline int device_reset_optional(struct device *dev)
{
return -ENOTSUPP;
}
-static inline struct reset_control *__must_check reset_control_get(
- struct device *dev, const char *id)
+static inline struct reset_control *__of_reset_control_get(
+ struct device_node *node,
+ const char *id, int index, int shared)
{
- WARN_ON(1);
return ERR_PTR(-EINVAL);
}
-static inline struct reset_control *__must_check devm_reset_control_get(
+static inline struct reset_control *__devm_reset_control_get(
+ struct device *dev,
+ const char *id, int index, int shared)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif /* CONFIG_RESET_CONTROLLER */
+
+/**
+ * reset_control_get - Lookup and obtain an exclusive reference to a
+ * reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * If this function is called more then once for the same reset_control it will
+ * return -EBUSY.
+ *
+ * See reset_control_get_shared for details on shared references to
+ * reset-controls.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *__must_check reset_control_get(
struct device *dev, const char *id)
{
+#ifndef CONFIG_RESET_CONTROLLER
WARN_ON(1);
- return ERR_PTR(-EINVAL);
+#endif
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
}
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
}
-static inline struct reset_control *devm_reset_control_get_optional(
+/**
+ * reset_control_get_shared - Lookup and obtain a shared reference to a
+ * reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * This function is intended for use with reset-controls which are shared
+ * between hardware-blocks.
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
}
+/**
+ * of_reset_control_get - Lookup and obtain an exclusive reference to a
+ * reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
static inline struct reset_control *of_reset_control_get(
struct device_node *node, const char *id)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(node, id, 0, 0);
}
+/**
+ * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
+ * a reset controller by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
+ */
static inline struct reset_control *of_reset_control_get_by_index(
- struct device_node *node, int index)
+ struct device_node *node, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return __of_reset_control_get(node, NULL, index, 0);
}
-#endif /* CONFIG_RESET_CONTROLLER */
+/**
+ * devm_reset_control_get - resource managed reset_control_get()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+static inline struct reset_control *__must_check devm_reset_control_get(
+ struct device *dev, const char *id)
+{
+#ifndef CONFIG_RESET_CONTROLLER
+ WARN_ON(1);
+#endif
+ return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+/**
+ * devm_reset_control_get_by_index - resource managed reset_control_get
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get(). For reset controllers returned from this
+ * function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_by_index(
+ struct device *dev, int index)
+{
+ return __devm_reset_control_get(dev, NULL, index, 0);
+}
+
+/**
+ * devm_reset_control_get_shared - resource managed reset_control_get_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_shared(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get_shared() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_shared(
+ struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, 1);
+}
+
+/**
+ * devm_reset_control_get_shared_by_index - resource managed
+ * reset_control_get_shared
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_shared(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver detach.
+ * See reset_control_get_shared() for more information.
+ */
+static inline struct reset_control *devm_reset_control_get_shared_by_index(
+ struct device *dev, int index)
+{
+ return __devm_reset_control_get(dev, NULL, index, 1);
+}
#endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 63bd7601b6de..3eef0802a0cd 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -346,7 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
struct bucket_table *old_tbl);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
+ gfp_t gfp);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
void *rhashtable_walk_next(struct rhashtable_iter *iter);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 82a673905edb..ada50ff36da0 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -169,7 +169,7 @@ struct rpmsg_driver {
int register_rpmsg_device(struct rpmsg_channel *dev);
void unregister_rpmsg_device(struct rpmsg_channel *dev);
-int register_rpmsg_driver(struct rpmsg_driver *drv);
+int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
void unregister_rpmsg_driver(struct rpmsg_driver *drv);
void rpmsg_destroy_ept(struct rpmsg_endpoint *);
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
@@ -177,6 +177,22 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
int
rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define register_rpmsg_driver(drv) \
+ __register_rpmsg_driver(drv, THIS_MODULE)
+
+/**
+ * module_rpmsg_driver() - Helper macro for registering an rpmsg driver
+ * @__rpmsg_driver: rpmsg_driver struct
+ *
+ * Helper macro for rpmsg drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_rpmsg_driver(__rpmsg_driver) \
+ module_driver(__rpmsg_driver, register_rpmsg_driver, \
+ unregister_rpmsg_driver)
+
/**
* rpmsg_send() - send a message across to the remote processor
* @rpdev: the rpmsg channel
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 561e8615528d..ae0528b834cd 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -34,7 +34,7 @@ struct rw_semaphore {
extern void __down_read(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __must_check __down_write_killable(struct rw_semaphore *sem);
extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8f498cdde280..d1c12d160ace 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/err.h>
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
@@ -43,6 +44,7 @@ struct rw_semaphore {
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
@@ -116,6 +118,7 @@ extern int down_read_trylock(struct rw_semaphore *sem);
* lock for writing
*/
extern void down_write(struct rw_semaphore *sem);
+extern int __must_check down_write_killable(struct rw_semaphore *sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 556ec1ea2574..cb3c8fe6acd7 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -286,6 +286,31 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
/*
+ * The maximum number of SG segments that we will put inside a
+ * scatterlist (unless chaining is used). Should ideally fit inside a
+ * single page, to avoid a higher order allocation. We could define this
+ * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
+ * minimum value is 32
+ */
+#define SG_CHUNK_SIZE 128
+
+/*
+ * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
+ * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
+ */
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+#define SG_MAX_SEGMENTS 2048
+#else
+#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
+#endif
+
+#ifdef CONFIG_SG_POOL
+void sg_free_table_chained(struct sg_table *table, bool first_chunk);
+int sg_alloc_table_chained(struct sg_table *table, int nents,
+ struct scatterlist *first_chunk);
+#endif
+
+/*
* sg page iterator
*
* Iterates over sg entries page-by-page. On each successful iteration,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 60bba7e032dc..31bd0d97d178 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -40,7 +40,6 @@ struct sched_param {
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
-#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
@@ -178,9 +177,11 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void update_cpu_load_nohz(int active);
+extern void cpu_load_update_nohz_start(void);
+extern void cpu_load_update_nohz_stop(void);
#else
-static inline void update_cpu_load_nohz(int active) { }
+static inline void cpu_load_update_nohz_start(void) { }
+static inline void cpu_load_update_nohz_stop(void) { }
#endif
extern void dump_cpu_task(int cpu);
@@ -372,6 +373,15 @@ extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
+extern int sched_cpu_starting(unsigned int cpu);
+extern int sched_cpu_activate(unsigned int cpu);
+extern int sched_cpu_deactivate(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int sched_cpu_dying(unsigned int cpu);
+#else
+# define sched_cpu_dying NULL
+#endif
extern void sched_show_task(struct task_struct *p);
@@ -720,7 +730,7 @@ struct signal_struct {
struct task_cputime cputime_expires;
#ifdef CONFIG_NO_HZ_FULL
- unsigned long tick_dep_mask;
+ atomic_t tick_dep_mask;
#endif
struct list_head cpu_timers[3];
@@ -935,9 +945,19 @@ enum cpu_idle_type {
};
/*
+ * Integer metrics need fixed point arithmetic, e.g., sched/fair
+ * has a few: load, load_avg, util_avg, freq, and capacity.
+ *
+ * We define a basic fixed point arithmetic range, and then formalize
+ * all these metrics based on that basic range.
+ */
+# define SCHED_FIXEDPOINT_SHIFT 10
+# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
+
+/*
* Increase resolution of cpu_capacity calculations
*/
-#define SCHED_CAPACITY_SHIFT 10
+#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
@@ -1199,18 +1219,56 @@ struct load_weight {
};
/*
- * The load_avg/util_avg accumulates an infinite geometric series.
- * 1) load_avg factors frequency scaling into the amount of time that a
- * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
- * aggregated such weights of all runnable and blocked sched_entities.
- * 2) util_avg factors frequency and cpu scaling into the amount of time
- * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
- * For cfs_rq, it is the aggregated such times of all runnable and
+ * The load_avg/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg() in kernel/sched/fair.c).
+ *
+ * [load_avg definition]
+ *
+ * load_avg = runnable% * scale_load_down(load)
+ *
+ * where runnable% is the time ratio that a sched_entity is runnable.
+ * For cfs_rq, it is the aggregated load_avg of all runnable and
* blocked sched_entities.
- * The 64 bit load_sum can:
- * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
- * the highest weight (=88761) always runnable, we should not overflow
- * 2) for entity, support any load.weight always runnable
+ *
+ * load_avg may also take frequency scaling into account:
+ *
+ * load_avg = runnable% * scale_load_down(load) * freq%
+ *
+ * where freq% is the CPU frequency normalized to the highest frequency.
+ *
+ * [util_avg definition]
+ *
+ * util_avg = running% * SCHED_CAPACITY_SCALE
+ *
+ * where running% is the time ratio that a sched_entity is running on
+ * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
+ * and blocked sched_entities.
+ *
+ * util_avg may also factor frequency scaling and CPU capacity scaling:
+ *
+ * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
+ *
+ * where freq% is the same as above, and capacity% is the CPU capacity
+ * normalized to the greatest capacity (due to uarch differences, etc).
+ *
+ * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
+ * themselves are in the range of [0, 1]. To do fixed point arithmetics,
+ * we therefore scale them to as large a range as necessary. This is for
+ * example reflected by util_avg's SCHED_CAPACITY_SCALE.
+ *
+ * [Overflow issue]
+ *
+ * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
+ * with the highest load (=88761), always runnable on a single cfs_rq,
+ * and should not overflow as the number already hits PID_MAX_LIMIT.
+ *
+ * For all other cases (including 32-bit kernels), struct load_weight's
+ * weight will overflow first before we do, because:
+ *
+ * Max(load_avg) <= Max(load.weight)
+ *
+ * Then it is the load_weight's responsibility to consider overflow
+ * issues.
*/
struct sched_avg {
u64 last_update_time, load_sum;
@@ -1549,7 +1607,7 @@ struct task_struct {
#endif
#ifdef CONFIG_NO_HZ_FULL
- unsigned long tick_dep_mask;
+ atomic_t tick_dep_mask;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
@@ -1596,6 +1654,7 @@ struct task_struct {
unsigned long sas_ss_sp;
size_t sas_ss_size;
+ unsigned sas_ss_flags;
struct callback_head *task_works;
@@ -1871,6 +1930,11 @@ extern int arch_task_struct_size __read_mostly;
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+static inline int tsk_nr_cpus_allowed(struct task_struct *p)
+{
+ return p->nr_cpus_allowed;
+}
+
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
@@ -2303,8 +2367,6 @@ extern unsigned long long notrace sched_clock(void);
/*
* See the comment in kernel/sched/clock.c
*/
-extern u64 cpu_clock(int cpu);
-extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
@@ -2323,6 +2385,16 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
+
+static inline u64 cpu_clock(int cpu)
+{
+ return sched_clock();
+}
+
+static inline u64 local_clock(void)
+{
+ return sched_clock();
+}
#else
/*
* Architectures can set this to 1 if they have specified
@@ -2337,6 +2409,26 @@ extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+
+/*
+ * As outlined in clock.c, provides a fast, high resolution, nanosecond
+ * time source that is monotonic per cpu argument and has bounded drift
+ * between cpus.
+ *
+ * ######################### BIG FAT WARNING ##########################
+ * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
+ * # go backwards !! #
+ * ####################################################################
+ */
+static inline u64 cpu_clock(int cpu)
+{
+ return sched_clock_cpu(cpu);
+}
+
+static inline u64 local_clock(void)
+{
+ return sched_clock_cpu(raw_smp_processor_id());
+}
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -2575,6 +2667,18 @@ static inline int kill_cad_pid(int sig, int priv)
*/
static inline int on_sig_stack(unsigned long sp)
{
+ /*
+ * If the signal stack is SS_AUTODISARM then, by construction, we
+ * can't be on the signal stack unless user code deliberately set
+ * SS_AUTODISARM when we were already on it.
+ *
+ * This improves reliability: if user state gets corrupted such that
+ * the stack pointer points very close to the end of the signal stack,
+ * then this check will enable the signal to be handled anyway.
+ */
+ if (current->sas_ss_flags & SS_AUTODISARM)
+ return 0;
+
#ifdef CONFIG_STACK_GROWSUP
return sp >= current->sas_ss_sp &&
sp - current->sas_ss_sp < current->sas_ss_size;
@@ -2592,6 +2696,13 @@ static inline int sas_ss_flags(unsigned long sp)
return on_sig_stack(sp) ? SS_ONSTACK : 0;
}
+static inline void sas_ss_reset(struct task_struct *p)
+{
+ p->sas_ss_sp = 0;
+ p->sas_ss_size = 0;
+ p->sas_ss_flags = SS_DISABLE;
+}
+
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
@@ -3240,7 +3351,10 @@ struct update_util_data {
u64 time, unsigned long util, unsigned long max);
};
-void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
+void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned long util, unsigned long max));
+void cpufreq_remove_update_util_hook(int cpu);
#endif /* CONFIG_CPU_FREQ */
#endif
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index a9414fd49dc6..dacb5e711994 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,4 +705,71 @@ typedef struct sctp_auth_chunk {
sctp_authhdr_t auth_hdr;
} __packed sctp_auth_chunk_t;
+struct sctp_info {
+ __u32 sctpi_tag;
+ __u32 sctpi_state;
+ __u32 sctpi_rwnd;
+ __u16 sctpi_unackdata;
+ __u16 sctpi_penddata;
+ __u16 sctpi_instrms;
+ __u16 sctpi_outstrms;
+ __u32 sctpi_fragmentation_point;
+ __u32 sctpi_inqueue;
+ __u32 sctpi_outqueue;
+ __u32 sctpi_overall_error;
+ __u32 sctpi_max_burst;
+ __u32 sctpi_maxseg;
+ __u32 sctpi_peer_rwnd;
+ __u32 sctpi_peer_tag;
+ __u8 sctpi_peer_capable;
+ __u8 sctpi_peer_sack;
+ __u16 __reserved1;
+
+ /* assoc status info */
+ __u64 sctpi_isacks;
+ __u64 sctpi_osacks;
+ __u64 sctpi_opackets;
+ __u64 sctpi_ipackets;
+ __u64 sctpi_rtxchunks;
+ __u64 sctpi_outofseqtsns;
+ __u64 sctpi_idupchunks;
+ __u64 sctpi_gapcnt;
+ __u64 sctpi_ouodchunks;
+ __u64 sctpi_iuodchunks;
+ __u64 sctpi_oodchunks;
+ __u64 sctpi_iodchunks;
+ __u64 sctpi_octrlchunks;
+ __u64 sctpi_ictrlchunks;
+
+ /* primary transport info */
+ struct sockaddr_storage sctpi_p_address;
+ __s32 sctpi_p_state;
+ __u32 sctpi_p_cwnd;
+ __u32 sctpi_p_srtt;
+ __u32 sctpi_p_rto;
+ __u32 sctpi_p_hbinterval;
+ __u32 sctpi_p_pathmaxrxt;
+ __u32 sctpi_p_sackdelay;
+ __u32 sctpi_p_sackfreq;
+ __u32 sctpi_p_ssthresh;
+ __u32 sctpi_p_partial_bytes_acked;
+ __u32 sctpi_p_flight_size;
+ __u16 sctpi_p_error;
+ __u16 __reserved2;
+
+ /* sctp sock info */
+ __u32 sctpi_s_autoclose;
+ __u32 sctpi_s_adaptation_ind;
+ __u32 sctpi_s_pd_point;
+ __u8 sctpi_s_nodelay;
+ __u8 sctpi_s_disable_fragments;
+ __u8 sctpi_s_v4mapped;
+ __u8 sctpi_s_frag_interleave;
+};
+
+struct sctp_infox {
+ struct sctp_info *sctpinfo;
+ struct sctp_association *asoc;
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 157f0cb1e4d2..14df373ff2ca 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -71,7 +71,7 @@ struct timezone;
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, int audit);
-extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
+extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -208,7 +208,13 @@ int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
-int security_settime(const struct timespec *ts, const struct timezone *tz);
+int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
+static inline int security_settime(const struct timespec *ts, const struct timezone *tz)
+{
+ struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+ return security_settime64(&ts64, tz);
+}
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
@@ -222,10 +228,10 @@ int security_sb_remount(struct super_block *sb, void *data);
int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
-int security_sb_mount(const char *dev_name, struct path *path,
+int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data);
int security_sb_umount(struct vfsmount *mnt, int flags);
-int security_sb_pivotroot(struct path *old_path, struct path *new_path);
+int security_sb_pivotroot(const struct path *old_path, const struct path *new_path);
int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts,
unsigned long kern_flags,
@@ -462,10 +468,18 @@ static inline int security_syslog(int type)
return 0;
}
+static inline int security_settime64(const struct timespec64 *ts,
+ const struct timezone *tz)
+{
+ return cap_settime(ts, tz);
+}
+
static inline int security_settime(const struct timespec *ts,
const struct timezone *tz)
{
- return cap_settime(ts, tz);
+ struct timespec64 ts64 = timespec_to_timespec64(*ts);
+
+ return cap_settime(&ts64, tz);
}
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
@@ -530,7 +544,7 @@ static inline int security_sb_statfs(struct dentry *dentry)
return 0;
}
-static inline int security_sb_mount(const char *dev_name, struct path *path,
+static inline int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags,
void *data)
{
@@ -542,8 +556,8 @@ static inline int security_sb_umount(struct vfsmount *mnt, int flags)
return 0;
}
-static inline int security_sb_pivotroot(struct path *old_path,
- struct path *new_path)
+static inline int security_sb_pivotroot(const struct path *old_path,
+ const struct path *new_path)
{
return 0;
}
@@ -1442,83 +1456,83 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_SECURITY_PATH
-int security_path_unlink(struct path *dir, struct dentry *dentry);
-int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode);
-int security_path_rmdir(struct path *dir, struct dentry *dentry);
-int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
+int security_path_unlink(const struct path *dir, struct dentry *dentry);
+int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode);
+int security_path_rmdir(const struct path *dir, struct dentry *dentry);
+int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev);
-int security_path_truncate(struct path *path);
-int security_path_symlink(struct path *dir, struct dentry *dentry,
+int security_path_truncate(const struct path *path);
+int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name);
-int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry);
-int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry,
+int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
+ const struct path *new_dir, struct dentry *new_dentry,
unsigned int flags);
-int security_path_chmod(struct path *path, umode_t mode);
-int security_path_chown(struct path *path, kuid_t uid, kgid_t gid);
-int security_path_chroot(struct path *path);
+int security_path_chmod(const struct path *path, umode_t mode);
+int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid);
+int security_path_chroot(const struct path *path);
#else /* CONFIG_SECURITY_PATH */
-static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
+static inline int security_path_unlink(const struct path *dir, struct dentry *dentry)
{
return 0;
}
-static inline int security_path_mkdir(struct path *dir, struct dentry *dentry,
+static inline int security_path_mkdir(const struct path *dir, struct dentry *dentry,
umode_t mode)
{
return 0;
}
-static inline int security_path_rmdir(struct path *dir, struct dentry *dentry)
+static inline int security_path_rmdir(const struct path *dir, struct dentry *dentry)
{
return 0;
}
-static inline int security_path_mknod(struct path *dir, struct dentry *dentry,
+static inline int security_path_mknod(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
return 0;
}
-static inline int security_path_truncate(struct path *path)
+static inline int security_path_truncate(const struct path *path)
{
return 0;
}
-static inline int security_path_symlink(struct path *dir, struct dentry *dentry,
+static inline int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name)
{
return 0;
}
static inline int security_path_link(struct dentry *old_dentry,
- struct path *new_dir,
+ const struct path *new_dir,
struct dentry *new_dentry)
{
return 0;
}
-static inline int security_path_rename(struct path *old_dir,
+static inline int security_path_rename(const struct path *old_dir,
struct dentry *old_dentry,
- struct path *new_dir,
+ const struct path *new_dir,
struct dentry *new_dentry,
unsigned int flags)
{
return 0;
}
-static inline int security_path_chmod(struct path *path, umode_t mode)
+static inline int security_path_chmod(const struct path *path, umode_t mode)
{
return 0;
}
-static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+static inline int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
return 0;
}
-static inline int security_path_chroot(struct path *path)
+static inline int security_path_chroot(const struct path *path)
{
return 0;
}
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00defbaa5..f3d45dd42695 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
#include <linux/mutex.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
struct seq_file {
char *buf;
@@ -27,9 +24,7 @@ struct seq_file {
struct mutex lock;
const struct seq_operations *op;
int poll_event;
-#ifdef CONFIG_USER_NS
- struct user_namespace *user_ns;
-#endif
+ const struct file *file;
void *private;
};
@@ -147,7 +142,7 @@ int seq_release_private(struct inode *, struct file *);
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
- return seq->user_ns;
+ return seq->file->f_cred->user_ns;
#else
extern struct user_namespace init_user_ns;
return &init_user_ns;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 92557bbce7e7..639be264f5f9 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -28,6 +28,21 @@ struct sigpending {
sigset_t signal;
};
+#ifndef HAVE_ARCH_COPY_SIGINFO
+
+#include <linux/string.h>
+
+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(*to));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+}
+
+#endif
+
/*
* Define some primitives to manipulate sigset_t.
*/
@@ -432,8 +447,10 @@ int __save_altstack(stack_t __user *, unsigned long);
stack_t __user *__uss = uss; \
struct task_struct *t = current; \
put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
- put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+ if (t->sas_ss_flags & SS_AUTODISARM) \
+ sas_ss_reset(t); \
} while (0);
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 15d0df943466..ee38a4127475 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -382,14 +382,10 @@ enum {
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
-
- /* generate software timestamp on peer data acknowledgment */
- SKBTX_ACK_TSTAMP = 1 << 7,
};
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP | \
- SKBTX_ACK_TSTAMP)
+ SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
/*
@@ -465,23 +461,27 @@ enum {
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 3,
- SKB_GSO_TCPV6 = 1 << 4,
+ SKB_GSO_TCP_FIXEDID = 1 << 4,
+
+ SKB_GSO_TCPV6 = 1 << 5,
- SKB_GSO_FCOE = 1 << 5,
+ SKB_GSO_FCOE = 1 << 6,
- SKB_GSO_GRE = 1 << 6,
+ SKB_GSO_GRE = 1 << 7,
- SKB_GSO_GRE_CSUM = 1 << 7,
+ SKB_GSO_GRE_CSUM = 1 << 8,
- SKB_GSO_IPIP = 1 << 8,
+ SKB_GSO_IPXIP4 = 1 << 9,
- SKB_GSO_SIT = 1 << 9,
+ SKB_GSO_IPXIP6 = 1 << 10,
- SKB_GSO_UDP_TUNNEL = 1 << 10,
+ SKB_GSO_UDP_TUNNEL = 1 << 11,
- SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
+ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
- SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
+ SKB_GSO_PARTIAL = 1 << 13,
+
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
};
#if BITS_PER_LONG > 32
@@ -1325,6 +1325,16 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
return dataref != 1;
}
+static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_header_cloned(skb))
+ return pskb_expand_head(skb, 0, 0, pri);
+
+ return 0;
+}
+
/**
* skb_header_release - release reference to header
* @skb: buffer to operate on
@@ -2457,7 +2467,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
static inline struct page *dev_alloc_pages(unsigned int order)
{
- return __dev_alloc_pages(GFP_ATOMIC, order);
+ return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
}
/**
@@ -2475,7 +2485,7 @@ static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
static inline struct page *dev_alloc_page(void)
{
- return __dev_alloc_page(GFP_ATOMIC);
+ return dev_alloc_pages(0);
}
/**
@@ -2949,7 +2959,12 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
+static inline void skb_free_datagram_locked(struct sock *sk,
+ struct sk_buff *skb)
+{
+ __skb_free_datagram_locked(sk, skb, 0);
+}
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
@@ -2977,6 +2992,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
+ gfp_t gfp);
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
@@ -3584,7 +3601,10 @@ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
* Keeps track of level of encapsulation of network headers.
*/
struct skb_gso_cb {
- int mac_offset;
+ union {
+ int mac_offset;
+ int data_offset;
+ };
int encap_level;
__wsum csum;
__u16 csum_start;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 508bd827e6dc..aeb3e6d00a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
void kmem_cache_free(struct kmem_cache *, void *);
/*
@@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p)
}
#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
#else
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
@@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
#endif
#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment;
+ int node, size_t size) __assume_slab_alignment __malloc;
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
#else
static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 9edbbf352340..8694f7a5d92b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -80,6 +80,10 @@ struct kmem_cache {
struct kasan_cache kasan_info;
#endif
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ void *random_seq;
+#endif
+
struct kmem_cache_node *node[MAX_NUMNODES];
};
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index d0cb6d189a0a..cbb0f06c41b2 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -26,6 +26,8 @@ struct qcom_smd_device {
struct qcom_smd_channel *channel;
};
+typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t);
+
/**
* struct qcom_smd_driver - smd driver struct
* @driver: underlying device driver
@@ -42,16 +44,71 @@ struct qcom_smd_driver {
int (*probe)(struct qcom_smd_device *dev);
void (*remove)(struct qcom_smd_device *dev);
- int (*callback)(struct qcom_smd_device *, const void *, size_t);
+ qcom_smd_cb_t callback;
};
+#if IS_ENABLED(CONFIG_QCOM_SMD)
+
int qcom_smd_driver_register(struct qcom_smd_driver *drv);
void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
+ const char *name,
+ qcom_smd_cb_t cb);
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+
+#else
+
+static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
+{
+ return -ENXIO;
+}
+
+static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline struct qcom_smd_channel *
+qcom_smd_open_channel(struct qcom_smd_channel *channel,
+ const char *name,
+ qcom_smd_cb_t cb)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return NULL;
+}
+
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return NULL;
+}
+
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+}
+
+static inline int qcom_smd_send(struct qcom_smd_channel *channel,
+ const void *data, int len)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+ return -ENXIO;
+}
+
+#endif
+
#define module_qcom_smd_driver(__smd_driver) \
module_driver(__smd_driver, qcom_smd_driver_register, \
qcom_smd_driver_unregister)
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
#endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index f35e1512fcaa..7b88697929e9 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -1,12 +1,17 @@
#ifndef __QCOM_SMEM_STATE__
#define __QCOM_SMEM_STATE__
+#include <linux/errno.h>
+
+struct device_node;
struct qcom_smem_state;
struct qcom_smem_state_ops {
int (*update_bits)(void *, u32, u32);
};
+#ifdef CONFIG_QCOM_SMEM_STATE
+
struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
void qcom_smem_state_put(struct qcom_smem_state *);
@@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val
struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data);
void qcom_smem_state_unregister(struct qcom_smem_state *state);
+#else
+
+static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+ const char *con_id, unsigned *bit)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+}
+
+static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+ u32 mask, u32 value)
+{
+ return -EINVAL;
+}
+
+static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+ const struct qcom_smem_state_ops *ops, void *data)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+}
+
+#endif
+
#endif
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
new file mode 100644
index 000000000000..92fc613ab23d
--- /dev/null
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__
+#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__
+
+#include <linux/types.h>
+
+struct rcar_sysc_ch {
+ u16 chan_offs;
+ u8 chan_bit;
+ u8 isr_bit;
+};
+
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
+void __iomem *rcar_sysc_init(phys_addr_t base);
+
+#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 73bf6c6a833b..b5cc5a6d7011 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -201,8 +201,9 @@ struct ucred {
#define AF_NFC 39 /* NFC sockets */
#define AF_VSOCK 40 /* vSockets */
#define AF_KCM 41 /* Kernel Connection Multiplexor*/
+#define AF_QIPCRTR 42 /* Qualcomm IPC Router */
-#define AF_MAX 42 /* For now.. */
+#define AF_MAX 43 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -249,6 +250,7 @@ struct ucred {
#define PF_NFC AF_NFC
#define PF_VSOCK AF_VSOCK
#define PF_KCM AF_KCM
+#define PF_QIPCRTR AF_QIPCRTR
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 4bcf5a61aada..ffdaca9c01af 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -108,7 +108,6 @@ struct stmmac_axi {
};
struct plat_stmmacenet_data {
- char *phy_bus_name;
int bus_id;
int phy_addr;
int interface;
@@ -138,5 +137,7 @@ struct plat_stmmacenet_data {
void (*exit)(struct platform_device *pdev, void *priv);
void *bsp_priv;
struct stmmac_axi *axi;
+ int has_gmac4;
+ bool tso_en;
};
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d3993a79a325..26b6f6a66f83 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -119,7 +119,7 @@ char *strreplace(char *s, char old, char new);
extern void kfree_const(const void *x);
-extern char *kstrdup(const char *s, gfp_t gfp);
+extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index dabe643eb5fa..5ce9538f290e 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -3,6 +3,8 @@
#include <linux/types.h>
+struct file;
+
/* Descriptions of the types of units to
* print in */
enum string_size_units {
@@ -68,4 +70,8 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
+char *kstrdup_quotable(const char *src, gfp_t gfp);
+char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
+char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+
#endif
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
*
* These happen to all be powers of 2, which is not strictly
* necessary but helps enforce the real limitation, which is
- * that they should be multiples of PAGE_CACHE_SIZE.
+ * that they should be multiples of PAGE_SIZE.
*
* For UDP transports, a block plus NFS,RPC, and UDP headers
* has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d18b65c53dbb..ad220359f1b0 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
-extern int reuse_swap_page(struct page *);
+extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -433,9 +433,9 @@ struct backing_dev_info;
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
/* only sparc can not include linux/pagemap.h in this file
- * so leave page_cache_release and release_pages undeclared... */
+ * so leave put_page and release_pages undeclared... */
#define free_page_and_swap_cache(page) \
- page_cache_release(page)
+ put_page(page)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr), false);
@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page) \
- (!PageTransCompound(page) && page_mapcount(page) == 1)
+#define reuse_swap_page(page, total_mapcount) \
+ (page_trans_huge_mapcount(page, total_mapcount) == 1)
static inline int try_to_free_swap(struct page *page)
{
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
+ /* Cgroup2 doesn't have per-cgroup swappiness */
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return vm_swappiness;
+
/* root ? */
if (mem_cgroup_disabled() || !memcg->css.parent)
return vm_swappiness;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a55d0523f75d..1b8a5a7876ce 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -352,8 +352,8 @@ struct thermal_zone_of_device_ops {
struct thermal_trip {
struct device_node *np;
- unsigned long int temperature;
- unsigned long int hysteresis;
+ int temperature;
+ int hysteresis;
enum thermal_trip_type type;
};
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 367d5af899e8..7e5d2fa9ac46 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -65,7 +65,6 @@ static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
-# define timespec64_add_safe timespec_add_safe
# define timespec64_add timespec_add
# define timespec64_sub timespec_sub
# define timespec64_valid timespec_valid
@@ -134,15 +133,6 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
-/*
- * timespec64_add_safe assumes both values are positive and checks for
- * overflow. It will return TIME_T_MAX if the returned value would be
- * smaller then either of the arguments.
- */
-extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
- const struct timespec64 rhs);
-
-
static inline struct timespec64 timespec64_add(struct timespec64 lhs,
struct timespec64 rhs)
{
@@ -224,4 +214,11 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
#endif
+/*
+ * timespec64_add_safe assumes both values are positive and checks for
+ * overflow. It will return TIME64_MAX in case of overflow.
+ */
+extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
+ const struct timespec64 rhs);
+
#endif /* _LINUX_TIME64_H */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 96f37bee3bc1..37dbacf84849 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_TIMEKEEPING_H
#define _LINUX_TIMEKEEPING_H
+#include <asm-generic/errno-base.h>
+
/* Included from linux/ktime.h */
void timekeeping_init(void);
@@ -11,8 +13,19 @@ extern int timekeeping_suspended;
*/
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday64(const struct timespec64 *ts);
-extern int do_sys_settimeofday(const struct timespec *tv,
- const struct timezone *tz);
+extern int do_sys_settimeofday64(const struct timespec64 *tv,
+ const struct timezone *tz);
+static inline int do_sys_settimeofday(const struct timespec *tv,
+ const struct timezone *tz)
+{
+ struct timespec64 ts64;
+
+ if (!tv)
+ return -EINVAL;
+
+ ts64 = timespec_to_timespec64(*tv);
+ return do_sys_settimeofday64(&ts64, tz);
+}
/*
* Kernel time accessors
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 0810f81b6db2..be007610ceb0 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -154,21 +154,6 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
-struct ring_buffer_event *
-trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
- int type, unsigned long len,
- unsigned long flags, int pc);
-void trace_buffer_unlock_commit(struct trace_array *tr,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
-void trace_buffer_unlock_commit_regs(struct trace_array *tr,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs);
-void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event);
void tracing_record_cmdline(struct task_struct *tsk);
@@ -229,7 +214,6 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_WAS_ENABLED_BIT,
- TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
@@ -244,7 +228,6 @@ enum {
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
- * USE_CALL_FILTER - For trace internal events, don't use file filter
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe
@@ -255,7 +238,6 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
- TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
@@ -407,16 +389,12 @@ enum event_trigger_type {
ETT_SNAPSHOT = (1 << 1),
ETT_STACKTRACE = (1 << 2),
ETT_EVENT_ENABLE = (1 << 3),
+ ETT_EVENT_HIST = (1 << 4),
+ ETT_HIST_ENABLE = (1 << 5),
};
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_check_discard(struct trace_event_file *file, void *rec,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event);
-extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event);
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
extern void event_triggers_post_call(struct trace_event_file *file,
@@ -450,100 +428,6 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
return false;
}
-/*
- * Helper function for event_trigger_unlock_commit{_regs}().
- * If there are event triggers attached to this event that requires
- * filtering against its fields, then they wil be called as the
- * entry already holds the field information of the current event.
- *
- * It also checks if the event should be discarded or not.
- * It is to be discarded if the event is soft disabled and the
- * event was only recorded to process triggers, or if the event
- * filter is active and this event did not match the filters.
- *
- * Returns true if the event is discarded, false otherwise.
- */
-static inline bool
-__event_trigger_test_discard(struct trace_event_file *file,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry,
- enum event_trigger_type *tt)
-{
- unsigned long eflags = file->flags;
-
- if (eflags & EVENT_FILE_FL_TRIGGER_COND)
- *tt = event_triggers_call(file, entry);
-
- if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
- ring_buffer_discard_commit(buffer, event);
- else if (!filter_check_discard(file, entry, buffer, event))
- return false;
-
- return true;
-}
-
-/**
- * event_trigger_unlock_commit - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- */
-static inline void
-event_trigger_unlock_commit(struct trace_event_file *file,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc)
-{
- enum event_trigger_type tt = ETT_NONE;
-
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
-
- if (tt)
- event_triggers_post_call(file, tt, entry);
-}
-
-/**
- * event_trigger_unlock_commit_regs - handle triggers and finish event commit
- * @file: The file pointer assoctiated to the event
- * @buffer: The ring buffer that the event is being written to
- * @event: The event meta data in the ring buffer
- * @entry: The event itself
- * @irq_flags: The state of the interrupts at the start of the event
- * @pc: The state of the preempt count at the start of the event.
- *
- * This is a helper function to handle triggers that require data
- * from the event itself. It also tests the event against filters and
- * if the event is soft disabled and should be discarded.
- *
- * Same as event_trigger_unlock_commit() but calls
- * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
- */
-static inline void
-event_trigger_unlock_commit_regs(struct trace_event_file *file,
- struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- void *entry, unsigned long irq_flags, int pc,
- struct pt_regs *regs)
-{
- enum event_trigger_type tt = ETT_NONE;
-
- if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
- trace_buffer_unlock_commit_regs(file->tr, buffer, event,
- irq_flags, pc, regs);
-
- if (tt)
- event_triggers_post_call(file, tt, entry);
-}
-
#ifdef CONFIG_BPF_EVENTS
unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
#else
@@ -569,6 +453,7 @@ extern int trace_define_field(struct trace_event_call *call, const char *type,
int is_signed, int filter_type);
extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
+extern int trace_event_get_offsets(struct trace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -605,15 +490,20 @@ extern void perf_trace_del(struct perf_event *event, int flags);
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
-extern void *perf_trace_buf_prepare(int size, unsigned short type,
- struct pt_regs **regs, int *rctxp);
+void perf_trace_buf_update(void *record, u16 type);
+void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
+
+void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
+ struct trace_event_call *call, u64 count,
+ struct pt_regs *regs, struct hlist_head *head,
+ struct task_struct *task);
static inline void
-perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
struct task_struct *task)
{
- perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
+ perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
}
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b09f235db66..17b247c94440 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -371,6 +371,7 @@ extern void proc_clear_tty(struct task_struct *p);
extern struct tty_struct *get_current_tty(void);
/* tty_io.c */
extern int __init tty_init(void);
+extern const char *tty_name(const struct tty_struct *tty);
#else
static inline void console_init(void)
{ }
@@ -391,6 +392,8 @@ static inline struct tty_struct *get_current_tty(void)
/* tty_io.c */
static inline int __init tty_init(void)
{ return 0; }
+static inline const char *tty_name(const struct tty_struct *tty)
+{ return "(none)"; }
#endif
extern struct ktermios tty_std_termios;
@@ -415,7 +418,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
return tty;
}
-extern const char *tty_name(const struct tty_struct *tty);
extern const char *tty_driver_name(const struct tty_struct *tty);
extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
extern int __tty_check_change(struct tty_struct *tty, int sig);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 161052477f77..b742b5e47cc2 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
* defined; unless noted otherwise, they are optional, and can be
* filled in with a null pointer.
*
- * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
*
* Return the tty device corresponding to idx, NULL if there is not
* one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
- struct inode *inode, int idx);
+ struct file *filp, int idx);
int (*install)(struct tty_driver *driver, struct tty_struct *tty);
void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
int (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index df89c9bcba7d..d3a2bb712af3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -89,6 +89,20 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
#endif
}
+static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ raw_write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ raw_write_seqcount_end(&syncp->seq);
+#endif
+}
+
static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 87c094961bd5..d1fd8cd39478 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -71,6 +71,14 @@ struct udp_sock {
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
+
+ /* GRO functions for UDP socket */
+ struct sk_buff ** (*gro_receive)(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sock *sk,
+ struct sk_buff *skb,
+ int nhoff);
};
static inline struct udp_sock *udp_sk(const struct sock *sk)
@@ -98,11 +106,11 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
-#define udp_portaddr_for_each_entry(__sk, node, list) \
- hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry(__sk, list) \
+ hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
-#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \
- hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry_rcu(__sk, list) \
+ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index fd9bcfedad42..1b5d1cd796e2 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 7f5f78bd15ad..245f57dbbb61 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -79,6 +79,8 @@
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
US_FLAG(MAX_SECTORS_240, 0x08000000) \
/* Sets max_sectors to 240 */ \
+ US_FLAG(NO_REPORT_LUNS, 0x10000000) \
+ /* Cannot handle REPORT_LUNS */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/verification.h b/include/linux/verification.h
new file mode 100644
index 000000000000..a10549a6c7cd
--- /dev/null
+++ b/include/linux/verification.h
@@ -0,0 +1,49 @@
+/* Signature verification
+ *
+ * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_VERIFICATION_H
+#define _LINUX_VERIFICATION_H
+
+/*
+ * The use to which an asymmetric key is being put.
+ */
+enum key_being_used_for {
+ VERIFYING_MODULE_SIGNATURE,
+ VERIFYING_FIRMWARE_SIGNATURE,
+ VERIFYING_KEXEC_PE_SIGNATURE,
+ VERIFYING_KEY_SIGNATURE,
+ VERIFYING_KEY_SELF_SIGNATURE,
+ VERIFYING_UNSPECIFIED_SIGNATURE,
+ NR__KEY_BEING_USED_FOR
+};
+extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
+
+#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
+
+struct key;
+
+extern int verify_pkcs7_signature(const void *data, size_t len,
+ const void *raw_pkcs7, size_t pkcs7_len,
+ struct key *trusted_keys,
+ enum key_being_used_for usage,
+ int (*view_content)(void *ctx,
+ const void *data, size_t len,
+ size_t asn1hdrlen),
+ void *ctx);
+
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
+ struct key *trusted_keys,
+ enum key_being_used_for usage);
+#endif
+
+#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
+#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
deleted file mode 100644
index da2049b5161c..000000000000
--- a/include/linux/verify_pefile.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Signed PE file verification
- *
- * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _LINUX_VERIFY_PEFILE_H
-#define _LINUX_VERIFY_PEFILE_H
-
-#include <crypto/public_key.h>
-
-extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
- struct key *trusted_keyring,
- enum key_being_used_for usage,
- bool *_trusted);
-
-#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 73fae8c4a5fb..d2da8e053210 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
#ifdef CONFIG_NUMA
extern unsigned long node_page_state(int node, enum zone_stat_item item);
-extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#else
#define node_page_state(node, item) global_page_state(item)
-#define zone_statistics(_zl, _z, gfp) do { } while (0)
#endif /* CONFIG_NUMA */
@@ -193,6 +191,10 @@ void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
+struct ctl_table;
+int vmstat_refresh(struct ctl_table *, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
int calculate_pressure_threshold(struct zone *zone);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 4457541de3c9..1cc4c578deb9 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,7 +30,8 @@ struct xattr_handler {
int flags; /* fs private flags */
bool (*list)(struct dentry *dentry);
int (*get)(const struct xattr_handler *, struct dentry *dentry,
- const char *name, void *buffer, size_t size);
+ struct inode *inode, const char *name, void *buffer,
+ size_t size);
int (*set)(const struct xattr_handler *, struct dentry *dentry,
const char *name, const void *buffer, size_t size,
int flags);
@@ -51,7 +52,7 @@ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, i
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int vfs_removexattr(struct dentry *, const char *);
-ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
diff --git a/include/media/media-device.h b/include/media/media-device.h
index df74cfa7da4a..a9b33c47310d 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -25,7 +25,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
@@ -304,8 +303,7 @@ struct media_entity_notify {
* @pads: List of registered pads
* @links: List of registered links
* @entity_notify: List of registered entity_notify callbacks
- * @lock: Entities list lock
- * @graph_mutex: Entities graph operation lock
+ * @graph_mutex: Protects access to struct media_device data
* @pm_count_walk: Graph walk for power state walk. Access serialised using
* graph_mutex.
*
@@ -313,7 +311,8 @@ struct media_entity_notify {
* @enable_source: Enable Source Handler function pointer
* @disable_source: Disable Source Handler function pointer
*
- * @link_notify: Link state change notification callback
+ * @link_notify: Link state change notification callback. This callback is
+ * called with the graph_mutex held.
*
* This structure represents an abstract high-level media device. It allows easy
* access to entities and provides basic media device-level support. The
@@ -357,7 +356,7 @@ struct media_device {
u32 hw_revision;
u32 driver_version;
- u32 topology_version;
+ u64 topology_version;
u32 id;
struct ida entity_internal_idx;
@@ -371,8 +370,6 @@ struct media_device {
/* notify callback list invoked when a new entity is registered */
struct list_head entity_notify;
- /* Protects the graph objects creation/removal */
- spinlock_t lock;
/* Serializes graph operations. */
struct mutex graph_mutex;
struct media_entity_graph pm_count_walk;
@@ -494,7 +491,7 @@ int __must_check __media_device_register(struct media_device *mdev,
#define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
/**
- * __media_device_unregister() - Unegisters a media device element
+ * media_device_unregister() - Unregisters a media device element
*
* @mdev: pointer to struct &media_device
*
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 6dc9e4e8cbd4..cbb266f7f2b5 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -179,6 +179,9 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_entity_pipeline_start() function
* validates all links by calling this operation. Optional.
+ *
+ * Note: Those these callbacks are called with struct media_device.@graph_mutex
+ * mutex held.
*/
struct media_entity_operations {
int (*link_setup)(struct media_entity *entity,
@@ -188,10 +191,38 @@ struct media_entity_operations {
};
/**
+ * enum media_entity_type - Media entity type
+ *
+ * @MEDIA_ENTITY_TYPE_BASE:
+ * The entity isn't embedded in another subsystem structure.
+ * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE:
+ * The entity is embedded in a struct video_device instance.
+ * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV:
+ * The entity is embedded in a struct v4l2_subdev instance.
+ *
+ * Media entity objects are often not instantiated directly, but the media
+ * entity structure is inherited by (through embedding) other subsystem-specific
+ * structures. The media entity type identifies the type of the subclass
+ * structure that implements a media entity instance.
+ *
+ * This allows runtime type identification of media entities and safe casting to
+ * the correct object type. For instance, a media entity structure instance
+ * embedded in a v4l2_subdev structure instance will have the type
+ * MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a v4l2_subdev
+ * structure using the container_of() macro.
+ */
+enum media_entity_type {
+ MEDIA_ENTITY_TYPE_BASE,
+ MEDIA_ENTITY_TYPE_VIDEO_DEVICE,
+ MEDIA_ENTITY_TYPE_V4L2_SUBDEV,
+};
+
+/**
* struct media_entity - A media entity graph object.
*
* @graph_obj: Embedded structure containing the media object common data.
* @name: Entity name.
+ * @obj_type: Type of the object that implements the media_entity.
* @function: Entity main function, as defined in uapi/media.h
* (MEDIA_ENT_F_*)
* @flags: Entity flags, as defined in uapi/media.h (MEDIA_ENT_FL_*)
@@ -220,6 +251,7 @@ struct media_entity_operations {
struct media_entity {
struct media_gobj graph_obj; /* must be first field in struct */
const char *name;
+ enum media_entity_type obj_type;
u32 function;
unsigned long flags;
@@ -329,56 +361,29 @@ static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id)
}
/**
- * is_media_entity_v4l2_io() - identify if the entity main function
- * is a V4L2 I/O
- *
+ * is_media_entity_v4l2_video_device() - Check if the entity is a video_device
* @entity: pointer to entity
*
- * Return: true if the entity main function is one of the V4L2 I/O types
- * (video, VBI or SDR radio); false otherwise.
+ * Return: true if the entity is an instance of a video_device object and can
+ * safely be cast to a struct video_device using the container_of() macro, or
+ * false otherwise.
*/
-static inline bool is_media_entity_v4l2_io(struct media_entity *entity)
+static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity)
{
- if (!entity)
- return false;
-
- switch (entity->function) {
- case MEDIA_ENT_F_IO_V4L:
- case MEDIA_ENT_F_IO_VBI:
- case MEDIA_ENT_F_IO_SWRADIO:
- return true;
- default:
- return false;
- }
+ return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
}
/**
- * is_media_entity_v4l2_subdev - return true if the entity main function is
- * associated with the V4L2 API subdev usage
- *
+ * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev
* @entity: pointer to entity
*
- * This is an ancillary function used by subdev-based V4L2 drivers.
- * It checks if the entity function is one of functions used by a V4L2 subdev,
- * e. g. camera-relatef functions, analog TV decoder, TV tuner, V4L2 DSPs.
+ * Return: true if the entity is an instance of a v4l2_subdev object and can
+ * safely be cast to a struct v4l2_subdev using the container_of() macro, or
+ * false otherwise.
*/
static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity)
{
- if (!entity)
- return false;
-
- switch (entity->function) {
- case MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
- case MEDIA_ENT_F_CAM_SENSOR:
- case MEDIA_ENT_F_FLASH:
- case MEDIA_ENT_F_LENS:
- case MEDIA_ENT_F_ATV_DECODER:
- case MEDIA_ENT_F_TUNER:
- return true;
-
- default:
- return false;
- }
+ return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
}
/**
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 0f77b3dffb37..b6586a91129c 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -215,12 +215,9 @@ enum raw_event_type {
struct ir_raw_event {
union {
u32 duration;
-
- struct {
- u32 carrier;
- u8 duty_cycle;
- };
+ u32 carrier;
};
+ u8 duty_cycle;
unsigned pulse:1;
unsigned reset:1;
@@ -228,13 +225,7 @@ struct ir_raw_event {
unsigned carrier_report:1;
};
-#define DEFINE_IR_RAW_EVENT(event) \
- struct ir_raw_event event = { \
- { .duration = 0 } , \
- .pulse = 0, \
- .reset = 0, \
- .timeout = 0, \
- .carrier_report = 0 }
+#define DEFINE_IR_RAW_EVENT(event) struct ir_raw_event event = {}
static inline void init_ir_raw_event(struct ir_raw_event *ev)
{
@@ -256,8 +247,7 @@ void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
- DEFINE_IR_RAW_EVENT(ev);
- ev.reset = true;
+ struct ir_raw_event ev = { .reset = true };
ir_raw_event_store(dev, &ev);
ir_raw_event_handle(dev);
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 76056ab5c5bd..25a3190308fb 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -92,6 +92,9 @@ struct video_device
/* device ops */
const struct v4l2_file_operations *fops;
+ /* device capabilities as used in v4l2_capabilities */
+ u32 device_caps;
+
/* sysfs */
struct device dev; /* v4l device */
struct cdev *cdev; /* character device */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 9c581578783f..d5d45a8d3998 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -196,11 +196,64 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
##args); \
})
-#define v4l2_device_has_op(v4l2_dev, o, f) \
+/*
+ * Call the specified callback for all subdevs where grp_id & grpmsk != 0
+ * (if grpmsk == `0, then match them all). Ignore any errors. Note that you
+ * cannot add or delete a subdev while walking the subdevs list.
+ */
+#define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \
+ do { \
+ struct v4l2_subdev *__sd; \
+ \
+ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
+ !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
+ ##args); \
+ } while (0)
+
+/*
+ * Call the specified callback for all subdevs where grp_id & grpmsk != 0
+ * (if grpmsk == `0, then match them all). If the callback returns an error
+ * other than 0 or -ENOIOCTLCMD, then return with that error code. Note that
+ * you cannot add or delete a subdev while walking the subdevs list.
+ */
+#define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \
+({ \
+ struct v4l2_subdev *__sd; \
+ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
+ !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
+ ##args); \
+})
+
+/*
+ * Does any subdev with matching grpid (or all if grpid == 0) has the given
+ * op?
+ */
+#define v4l2_device_has_op(v4l2_dev, grpid, o, f) \
+({ \
+ struct v4l2_subdev *__sd; \
+ bool __result = false; \
+ list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \
+ if ((grpid) && __sd->grp_id != (grpid)) \
+ continue; \
+ if (v4l2_subdev_has_op(__sd, o, f)) { \
+ __result = true; \
+ break; \
+ } \
+ } \
+ __result; \
+})
+
+/*
+ * Does any subdev with matching grpmsk (or all if grpmsk == 0) has the given
+ * op?
+ */
+#define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \
({ \
struct v4l2_subdev *__sd; \
bool __result = false; \
list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \
+ if ((grpmsk) && !(__sd->grp_id & (grpmsk))) \
+ continue; \
if (v4l2_subdev_has_op(__sd, o, f)) { \
__result = true; \
break; \
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
new file mode 100644
index 000000000000..d2125f0cc7cd
--- /dev/null
+++ b/include/media/v4l2-rect.h
@@ -0,0 +1,173 @@
+/*
+ * v4l2-rect.h - v4l2_rect helper functions
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_RECT_H_
+#define _V4L2_RECT_H_
+
+#include <linux/videodev2.h>
+
+/**
+ * v4l2_rect_set_size_to() - copy the width/height values.
+ * @r: rect whose width and height fields will be set
+ * @size: rect containing the width and height fields you need.
+ */
+static inline void v4l2_rect_set_size_to(struct v4l2_rect *r,
+ const struct v4l2_rect *size)
+{
+ r->width = size->width;
+ r->height = size->height;
+}
+
+/**
+ * v4l2_rect_set_min_size() - width and height of r should be >= min_size.
+ * @r: rect whose width and height will be modified
+ * @min_size: rect containing the minimal width and height
+ */
+static inline void v4l2_rect_set_min_size(struct v4l2_rect *r,
+ const struct v4l2_rect *min_size)
+{
+ if (r->width < min_size->width)
+ r->width = min_size->width;
+ if (r->height < min_size->height)
+ r->height = min_size->height;
+}
+
+/**
+ * v4l2_rect_set_max_size() - width and height of r should be <= max_size
+ * @r: rect whose width and height will be modified
+ * @max_size: rect containing the maximum width and height
+ */
+static inline void v4l2_rect_set_max_size(struct v4l2_rect *r,
+ const struct v4l2_rect *max_size)
+{
+ if (r->width > max_size->width)
+ r->width = max_size->width;
+ if (r->height > max_size->height)
+ r->height = max_size->height;
+}
+
+/**
+ * v4l2_rect_map_inside()- r should be inside boundary.
+ * @r: rect that will be modified
+ * @boundary: rect containing the boundary for @r
+ */
+static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
+ const struct v4l2_rect *boundary)
+{
+ v4l2_rect_set_max_size(r, boundary);
+ if (r->left < boundary->left)
+ r->left = boundary->left;
+ if (r->top < boundary->top)
+ r->top = boundary->top;
+ if (r->left + r->width > boundary->width)
+ r->left = boundary->width - r->width;
+ if (r->top + r->height > boundary->height)
+ r->top = boundary->height - r->height;
+}
+
+/**
+ * v4l2_rect_same_size() - return true if r1 has the same size as r2
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Return true if both rectangles have the same size.
+ */
+static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ return r1->width == r2->width && r1->height == r2->height;
+}
+
+/**
+ * v4l2_rect_intersect() - calculate the intersection of two rects.
+ * @r: intersection of @r1 and @r2.
+ * @r1: rectangle.
+ * @r2: rectangle.
+ */
+static inline void v4l2_rect_intersect(struct v4l2_rect *r,
+ const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ int right, bottom;
+
+ r->top = max(r1->top, r2->top);
+ r->left = max(r1->left, r2->left);
+ bottom = min(r1->top + r1->height, r2->top + r2->height);
+ right = min(r1->left + r1->width, r2->left + r2->width);
+ r->height = max(0, bottom - r->top);
+ r->width = max(0, right - r->left);
+}
+
+/**
+ * v4l2_rect_scale() - scale rect r by to/from
+ * @r: rect to be scaled.
+ * @from: from rectangle.
+ * @to: to rectangle.
+ *
+ * This scales rectangle @r horizontally by @to->width / @from->width and
+ * vertically by @to->height / @from->height.
+ *
+ * Typically @r is a rectangle inside @from and you want the rectangle as
+ * it would appear after scaling @from to @to. So the resulting @r will
+ * be the scaled rectangle inside @to.
+ */
+static inline void v4l2_rect_scale(struct v4l2_rect *r,
+ const struct v4l2_rect *from,
+ const struct v4l2_rect *to)
+{
+ if (from->width == 0 || from->height == 0) {
+ r->left = r->top = r->width = r->height = 0;
+ return;
+ }
+ r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
+ r->width = ((r->width * to->width) / from->width) & ~1;
+ r->top = ((r->top - from->top) * to->height) / from->height;
+ r->height = (r->height * to->height) / from->height;
+}
+
+/**
+ * v4l2_rect_overlap() - do r1 and r2 overlap?
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Returns true if @r1 and @r2 overlap.
+ */
+static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ /*
+ * IF the left side of r1 is to the right of the right side of r2 OR
+ * the left side of r2 is to the right of the right side of r1 THEN
+ * they do not overlap.
+ */
+ if (r1->left >= r2->left + r2->width ||
+ r2->left >= r1->left + r1->width)
+ return false;
+ /*
+ * IF the top side of r1 is below the bottom of r2 OR
+ * the top side of r2 is below the bottom of r1 THEN
+ * they do not overlap.
+ */
+ if (r1->top >= r2->top + r2->height ||
+ r2->top >= r1->top + r1->height)
+ return false;
+ return true;
+}
+
+#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 11e2dfec0198..32fc7a4beb5e 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -572,6 +572,7 @@ struct v4l2_subdev_pad_config {
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
+ * @init_cfg: initialize the pad config to default values
* @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler
* code.
* @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler
@@ -607,6 +608,8 @@ struct v4l2_subdev_pad_config {
* may be adjusted by the subdev driver to device capabilities.
*/
struct v4l2_subdev_pad_ops {
+ int (*init_cfg)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code);
@@ -801,7 +804,12 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt);
int v4l2_subdev_link_validate(struct media_link *link);
+
+struct v4l2_subdev_pad_config *
+v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd);
+void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg);
#endif /* CONFIG_MEDIA_CONTROLLER */
+
void v4l2_subdev_init(struct v4l2_subdev *sd,
const struct v4l2_subdev_ops *ops);
diff --git a/include/media/v4l2-tpg-colors.h b/include/media/v4l2-tpg-colors.h
new file mode 100644
index 000000000000..2a88d1fae0cd
--- /dev/null
+++ b/include/media/v4l2-tpg-colors.h
@@ -0,0 +1,68 @@
+/*
+ * v4l2-tpg-colors.h - Color definitions for the test pattern generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_TPG_COLORS_H_
+#define _V4L2_TPG_COLORS_H_
+
+struct color {
+ unsigned char r, g, b;
+};
+
+struct color16 {
+ int r, g, b;
+};
+
+enum tpg_color {
+ TPG_COLOR_CSC_WHITE,
+ TPG_COLOR_CSC_YELLOW,
+ TPG_COLOR_CSC_CYAN,
+ TPG_COLOR_CSC_GREEN,
+ TPG_COLOR_CSC_MAGENTA,
+ TPG_COLOR_CSC_RED,
+ TPG_COLOR_CSC_BLUE,
+ TPG_COLOR_CSC_BLACK,
+ TPG_COLOR_75_YELLOW,
+ TPG_COLOR_75_CYAN,
+ TPG_COLOR_75_GREEN,
+ TPG_COLOR_75_MAGENTA,
+ TPG_COLOR_75_RED,
+ TPG_COLOR_75_BLUE,
+ TPG_COLOR_100_WHITE,
+ TPG_COLOR_100_YELLOW,
+ TPG_COLOR_100_CYAN,
+ TPG_COLOR_100_GREEN,
+ TPG_COLOR_100_MAGENTA,
+ TPG_COLOR_100_RED,
+ TPG_COLOR_100_BLUE,
+ TPG_COLOR_100_BLACK,
+ TPG_COLOR_TEXTFG,
+ TPG_COLOR_TEXTBG,
+ TPG_COLOR_RANDOM,
+ TPG_COLOR_RAMP,
+ TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
+};
+
+extern const struct color tpg_colors[TPG_COLOR_MAX];
+extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
+extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
+extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
+ [V4L2_XFER_FUNC_SMPTE2084 + 1]
+ [TPG_COLOR_CSC_BLACK + 1];
+
+#endif
diff --git a/include/media/v4l2-tpg.h b/include/media/v4l2-tpg.h
new file mode 100644
index 000000000000..329bebfa930c
--- /dev/null
+++ b/include/media/v4l2-tpg.h
@@ -0,0 +1,597 @@
+/*
+ * v4l2-tpg.h - Test Pattern Generator
+ *
+ * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _V4L2_TPG_H_
+#define _V4L2_TPG_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-tpg-colors.h>
+
+enum tpg_pattern {
+ TPG_PAT_75_COLORBAR,
+ TPG_PAT_100_COLORBAR,
+ TPG_PAT_CSC_COLORBAR,
+ TPG_PAT_100_HCOLORBAR,
+ TPG_PAT_100_COLORSQUARES,
+ TPG_PAT_BLACK,
+ TPG_PAT_WHITE,
+ TPG_PAT_RED,
+ TPG_PAT_GREEN,
+ TPG_PAT_BLUE,
+ TPG_PAT_CHECKERS_16X16,
+ TPG_PAT_CHECKERS_2X2,
+ TPG_PAT_CHECKERS_1X1,
+ TPG_PAT_COLOR_CHECKERS_2X2,
+ TPG_PAT_COLOR_CHECKERS_1X1,
+ TPG_PAT_ALTERNATING_HLINES,
+ TPG_PAT_ALTERNATING_VLINES,
+ TPG_PAT_CROSS_1_PIXEL,
+ TPG_PAT_CROSS_2_PIXELS,
+ TPG_PAT_CROSS_10_PIXELS,
+ TPG_PAT_GRAY_RAMP,
+
+ /* Must be the last pattern */
+ TPG_PAT_NOISE,
+};
+
+extern const char * const tpg_pattern_strings[];
+
+enum tpg_quality {
+ TPG_QUAL_COLOR,
+ TPG_QUAL_GRAY,
+ TPG_QUAL_NOISE
+};
+
+enum tpg_video_aspect {
+ TPG_VIDEO_ASPECT_IMAGE,
+ TPG_VIDEO_ASPECT_4X3,
+ TPG_VIDEO_ASPECT_14X9_CENTRE,
+ TPG_VIDEO_ASPECT_16X9_CENTRE,
+ TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
+};
+
+enum tpg_pixel_aspect {
+ TPG_PIXEL_ASPECT_SQUARE,
+ TPG_PIXEL_ASPECT_NTSC,
+ TPG_PIXEL_ASPECT_PAL,
+};
+
+enum tpg_move_mode {
+ TPG_MOVE_NEG_FAST,
+ TPG_MOVE_NEG,
+ TPG_MOVE_NEG_SLOW,
+ TPG_MOVE_NONE,
+ TPG_MOVE_POS_SLOW,
+ TPG_MOVE_POS,
+ TPG_MOVE_POS_FAST,
+};
+
+extern const char * const tpg_aspect_strings[];
+
+#define TPG_MAX_PLANES 3
+#define TPG_MAX_PAT_LINES 8
+
+struct tpg_data {
+ /* Source frame size */
+ unsigned src_width, src_height;
+ /* Buffer height */
+ unsigned buf_height;
+ /* Scaled output frame size */
+ unsigned scaled_width;
+ u32 field;
+ bool field_alternate;
+ /* crop coordinates are frame-based */
+ struct v4l2_rect crop;
+ /* compose coordinates are format-based */
+ struct v4l2_rect compose;
+ /* border and square coordinates are frame-based */
+ struct v4l2_rect border;
+ struct v4l2_rect square;
+
+ /* Color-related fields */
+ enum tpg_quality qual;
+ unsigned qual_offset;
+ u8 alpha_component;
+ bool alpha_red_only;
+ u8 brightness;
+ u8 contrast;
+ u8 saturation;
+ s16 hue;
+ u32 fourcc;
+ bool is_yuv;
+ u32 colorspace;
+ u32 xfer_func;
+ u32 ycbcr_enc;
+ /*
+ * Stores the actual transfer function, i.e. will never be
+ * V4L2_XFER_FUNC_DEFAULT.
+ */
+ u32 real_xfer_func;
+ /*
+ * Stores the actual Y'CbCr encoding, i.e. will never be
+ * V4L2_YCBCR_ENC_DEFAULT.
+ */
+ u32 real_ycbcr_enc;
+ u32 quantization;
+ /*
+ * Stores the actual quantization, i.e. will never be
+ * V4L2_QUANTIZATION_DEFAULT.
+ */
+ u32 real_quantization;
+ enum tpg_video_aspect vid_aspect;
+ enum tpg_pixel_aspect pix_aspect;
+ unsigned rgb_range;
+ unsigned real_rgb_range;
+ unsigned buffers;
+ unsigned planes;
+ bool interleaved;
+ u8 vdownsampling[TPG_MAX_PLANES];
+ u8 hdownsampling[TPG_MAX_PLANES];
+ /*
+ * horizontal positions must be ANDed with this value to enforce
+ * correct boundaries for packed YUYV values.
+ */
+ unsigned hmask[TPG_MAX_PLANES];
+ /* Used to store the colors in native format, either RGB or YUV */
+ u8 colors[TPG_COLOR_MAX][3];
+ u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
+ /* size in bytes for two pixels in each plane */
+ unsigned twopixelsize[TPG_MAX_PLANES];
+ unsigned bytesperline[TPG_MAX_PLANES];
+
+ /* Configuration */
+ enum tpg_pattern pattern;
+ bool hflip;
+ bool vflip;
+ unsigned perc_fill;
+ bool perc_fill_blank;
+ bool show_border;
+ bool show_square;
+ bool insert_sav;
+ bool insert_eav;
+
+ /* Test pattern movement */
+ enum tpg_move_mode mv_hor_mode;
+ int mv_hor_count;
+ int mv_hor_step;
+ enum tpg_move_mode mv_vert_mode;
+ int mv_vert_count;
+ int mv_vert_step;
+
+ bool recalc_colors;
+ bool recalc_lines;
+ bool recalc_square_border;
+
+ /* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
+ unsigned max_line_width;
+ u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+ u8 *downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
+ u8 *random_line[TPG_MAX_PLANES];
+ u8 *contrast_line[TPG_MAX_PLANES];
+ u8 *black_line[TPG_MAX_PLANES];
+};
+
+void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
+int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
+void tpg_free(struct tpg_data *tpg);
+void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
+ u32 field);
+void tpg_log_status(struct tpg_data *tpg);
+
+void tpg_set_font(const u8 *f);
+void tpg_gen_text(const struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
+void tpg_calc_text_basep(struct tpg_data *tpg,
+ u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
+unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
+void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf);
+void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
+ unsigned p, u8 *vbuf);
+bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
+void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose);
+
+static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
+{
+ if (tpg->pattern == pattern)
+ return;
+ tpg->pattern = pattern;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_quality(struct tpg_data *tpg,
+ enum tpg_quality qual, unsigned qual_offset)
+{
+ if (tpg->qual == qual && tpg->qual_offset == qual_offset)
+ return;
+ tpg->qual = qual;
+ tpg->qual_offset = qual_offset;
+ tpg->recalc_colors = true;
+}
+
+static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg)
+{
+ return tpg->qual;
+}
+
+static inline void tpg_s_alpha_component(struct tpg_data *tpg,
+ u8 alpha_component)
+{
+ if (tpg->alpha_component == alpha_component)
+ return;
+ tpg->alpha_component = alpha_component;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_alpha_mode(struct tpg_data *tpg,
+ bool red_only)
+{
+ if (tpg->alpha_red_only == red_only)
+ return;
+ tpg->alpha_red_only = red_only;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_brightness(struct tpg_data *tpg,
+ u8 brightness)
+{
+ if (tpg->brightness == brightness)
+ return;
+ tpg->brightness = brightness;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_contrast(struct tpg_data *tpg,
+ u8 contrast)
+{
+ if (tpg->contrast == contrast)
+ return;
+ tpg->contrast = contrast;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_saturation(struct tpg_data *tpg,
+ u8 saturation)
+{
+ if (tpg->saturation == saturation)
+ return;
+ tpg->saturation = saturation;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_hue(struct tpg_data *tpg,
+ s16 hue)
+{
+ if (tpg->hue == hue)
+ return;
+ tpg->hue = hue;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_rgb_range(struct tpg_data *tpg,
+ unsigned rgb_range)
+{
+ if (tpg->rgb_range == rgb_range)
+ return;
+ tpg->rgb_range = rgb_range;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_real_rgb_range(struct tpg_data *tpg,
+ unsigned rgb_range)
+{
+ if (tpg->real_rgb_range == rgb_range)
+ return;
+ tpg->real_rgb_range = rgb_range;
+ tpg->recalc_colors = true;
+}
+
+static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace)
+{
+ if (tpg->colorspace == colorspace)
+ return;
+ tpg->colorspace = colorspace;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
+{
+ return tpg->colorspace;
+}
+
+static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
+{
+ if (tpg->ycbcr_enc == ycbcr_enc)
+ return;
+ tpg->ycbcr_enc = ycbcr_enc;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
+{
+ return tpg->ycbcr_enc;
+}
+
+static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
+{
+ if (tpg->xfer_func == xfer_func)
+ return;
+ tpg->xfer_func = xfer_func;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg)
+{
+ return tpg->xfer_func;
+}
+
+static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
+{
+ if (tpg->quantization == quantization)
+ return;
+ tpg->quantization = quantization;
+ tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
+{
+ return tpg->quantization;
+}
+
+static inline unsigned tpg_g_buffers(const struct tpg_data *tpg)
+{
+ return tpg->buffers;
+}
+
+static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
+{
+ return tpg->interleaved ? 1 : tpg->planes;
+}
+
+static inline bool tpg_g_interleaved(const struct tpg_data *tpg)
+{
+ return tpg->interleaved;
+}
+
+static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
+{
+ return tpg->twopixelsize[plane];
+}
+
+static inline unsigned tpg_hdiv(const struct tpg_data *tpg,
+ unsigned plane, unsigned x)
+{
+ return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) *
+ tpg->twopixelsize[plane] / 2;
+}
+
+static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x)
+{
+ return (x * tpg->scaled_width) / tpg->src_width;
+}
+
+static inline unsigned tpg_hscale_div(const struct tpg_data *tpg,
+ unsigned plane, unsigned x)
+{
+ return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x));
+}
+
+static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
+{
+ return tpg->bytesperline[plane];
+}
+
+static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
+{
+ unsigned p;
+
+ if (tpg->buffers > 1) {
+ tpg->bytesperline[plane] = bpl;
+ return;
+ }
+
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+ tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
+ }
+ if (tpg_g_interleaved(tpg))
+ tpg->bytesperline[1] = tpg->bytesperline[0];
+}
+
+
+static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane)
+{
+ unsigned w = 0;
+ unsigned p;
+
+ if (tpg->buffers > 1)
+ return tpg_g_bytesperline(tpg, plane);
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = tpg_g_bytesperline(tpg, p);
+
+ w += plane_w / tpg->vdownsampling[p];
+ }
+ return w;
+}
+
+static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg,
+ unsigned plane, unsigned bpl)
+{
+ unsigned w = 0;
+ unsigned p;
+
+ if (tpg->buffers > 1)
+ return bpl;
+ for (p = 0; p < tpg_g_planes(tpg); p++) {
+ unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
+
+ plane_w /= tpg->hdownsampling[p];
+ w += plane_w / tpg->vdownsampling[p];
+ }
+ return w;
+}
+
+static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane)
+{
+ if (plane >= tpg_g_planes(tpg))
+ return 0;
+
+ return tpg_g_bytesperline(tpg, plane) * tpg->buf_height /
+ tpg->vdownsampling[plane];
+}
+
+static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
+{
+ tpg->buf_height = h;
+}
+
+static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate)
+{
+ tpg->field = field;
+ tpg->field_alternate = alternate;
+}
+
+static inline void tpg_s_perc_fill(struct tpg_data *tpg,
+ unsigned perc_fill)
+{
+ tpg->perc_fill = perc_fill;
+}
+
+static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg)
+{
+ return tpg->perc_fill;
+}
+
+static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg,
+ bool perc_fill_blank)
+{
+ tpg->perc_fill_blank = perc_fill_blank;
+}
+
+static inline void tpg_s_video_aspect(struct tpg_data *tpg,
+ enum tpg_video_aspect vid_aspect)
+{
+ if (tpg->vid_aspect == vid_aspect)
+ return;
+ tpg->vid_aspect = vid_aspect;
+ tpg->recalc_square_border = true;
+}
+
+static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg)
+{
+ return tpg->vid_aspect;
+}
+
+static inline void tpg_s_pixel_aspect(struct tpg_data *tpg,
+ enum tpg_pixel_aspect pix_aspect)
+{
+ if (tpg->pix_aspect == pix_aspect)
+ return;
+ tpg->pix_aspect = pix_aspect;
+ tpg->recalc_square_border = true;
+}
+
+static inline void tpg_s_show_border(struct tpg_data *tpg,
+ bool show_border)
+{
+ tpg->show_border = show_border;
+}
+
+static inline void tpg_s_show_square(struct tpg_data *tpg,
+ bool show_square)
+{
+ tpg->show_square = show_square;
+}
+
+static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav)
+{
+ tpg->insert_sav = insert_sav;
+}
+
+static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
+{
+ tpg->insert_eav = insert_eav;
+}
+
+void tpg_update_mv_step(struct tpg_data *tpg);
+
+static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
+ enum tpg_move_mode mv_hor_mode)
+{
+ tpg->mv_hor_mode = mv_hor_mode;
+ tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg,
+ enum tpg_move_mode mv_vert_mode)
+{
+ tpg->mv_vert_mode = mv_vert_mode;
+ tpg_update_mv_step(tpg);
+}
+
+static inline void tpg_init_mv_count(struct tpg_data *tpg)
+{
+ tpg->mv_hor_count = tpg->mv_vert_count = 0;
+}
+
+static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field)
+{
+ tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2);
+ tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2);
+}
+
+static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip)
+{
+ if (tpg->hflip == hflip)
+ return;
+ tpg->hflip = hflip;
+ tpg_update_mv_step(tpg);
+ tpg->recalc_lines = true;
+}
+
+static inline bool tpg_g_hflip(const struct tpg_data *tpg)
+{
+ return tpg->hflip;
+}
+
+static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip)
+{
+ tpg->vflip = vflip;
+}
+
+static inline bool tpg_g_vflip(const struct tpg_data *tpg)
+{
+ return tpg->vflip;
+}
+
+static inline bool tpg_pattern_is_static(const struct tpg_data *tpg)
+{
+ return tpg->pattern != TPG_PAT_NOISE &&
+ tpg->mv_hor_mode == TPG_MOVE_NONE &&
+ tpg->mv_vert_mode == TPG_MOVE_NONE;
+}
+
+#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
/**
* struct vb2_ops - driver-specific callbacks
*
+ * @verify_planes_array: Verify that a given user space structure contains
+ * enough planes for the buffer. This is called
+ * for each dequeued buffer.
* @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
* For V4L2 this is a struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
* the vb2_buffer struct.
*/
struct vb2_buf_ops {
+ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ * has not been called. This is a vb1 idiom that has been adopted
+ * also by vb2.
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
+ unsigned quirk_poll_must_check_waiting_for_buffers:1;
struct mutex *lock;
void *owner;
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index cc541753896f..3e654a0455bd 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -23,11 +23,22 @@ int vsp1_du_init(struct device *dev);
int vsp1_du_setup_lif(struct device *dev, unsigned int width,
unsigned int height);
-int vsp1_du_atomic_begin(struct device *dev);
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf, u32 pixelformat,
- unsigned int pitch, dma_addr_t mem[2],
- const struct v4l2_rect *src,
- const struct v4l2_rect *dst);
-int vsp1_du_atomic_flush(struct device *dev);
+void vsp1_du_atomic_begin(struct device *dev);
+int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf,
+ u32 pixelformat, unsigned int pitch,
+ dma_addr_t mem[2], const struct v4l2_rect *src,
+ const struct v4l2_rect *dst, unsigned int alpha,
+ unsigned int zpos);
+void vsp1_du_atomic_flush(struct device *dev);
+
+static inline int vsp1_du_atomic_update(struct device *dev,
+ unsigned int rpf_index, u32 pixelformat,
+ unsigned int pitch, dma_addr_t mem[2],
+ const struct v4l2_rect *src,
+ const struct v4l2_rect *dst)
+{
+ return vsp1_du_atomic_update_ext(dev, rpf_index, pixelformat, pitch,
+ mem, src, dst, 255, 0);
+}
#endif /* __MEDIA_VSP1_H__ */
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7d5e2613c7b8..56560c5781b4 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -127,6 +127,14 @@ int cxl_afu_reset(struct cxl_context *ctx);
void cxl_set_master(struct cxl_context *ctx);
/*
+ * Sets the context to use real mode memory accesses to operate with
+ * translation disabled. Note that this only makes sense for kernel contexts
+ * under bare metal, and will not work with virtualisation. May only be
+ * performed on stopped contexts.
+ */
+int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode);
+
+/*
* Map and unmap the AFU Problem Space area. The amount and location mapped
* depends on if this context is a master or slave.
*/
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index da3a77d25fcb..da84cf920b78 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -58,6 +58,9 @@
#include <net/ipv6.h>
#include <net/net_namespace.h>
+/* special link-layer handling */
+#include <net/mac802154.h>
+
#define EUI64_ADDR_LEN 8
#define LOWPAN_NHC_MAX_ID_LEN 1
@@ -93,7 +96,7 @@ static inline bool lowpan_is_iphc(u8 dispatch)
}
#define LOWPAN_PRIV_SIZE(llpriv_size) \
- (sizeof(struct lowpan_priv) + llpriv_size)
+ (sizeof(struct lowpan_dev) + llpriv_size)
enum lowpan_lltypes {
LOWPAN_LLTYPE_BTLE,
@@ -129,7 +132,7 @@ lowpan_iphc_ctx_is_compression(const struct lowpan_iphc_ctx *ctx)
return test_bit(LOWPAN_IPHC_CTX_FLAG_COMPRESSION, &ctx->flags);
}
-struct lowpan_priv {
+struct lowpan_dev {
enum lowpan_lltypes lltype;
struct dentry *iface_debugfs;
struct lowpan_iphc_ctx_table ctx;
@@ -139,11 +142,23 @@ struct lowpan_priv {
};
static inline
-struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+struct lowpan_dev *lowpan_dev(const struct net_device *dev)
{
return netdev_priv(dev);
}
+/* private device info */
+struct lowpan_802154_dev {
+ struct net_device *wdev; /* wpan device ptr */
+ u16 fragment_tag;
+};
+
+static inline struct
+lowpan_802154_dev *lowpan_802154_dev(const struct net_device *dev)
+{
+ return (struct lowpan_802154_dev *)lowpan_dev(dev)->priv;
+}
+
struct lowpan_802154_cb {
u16 d_tag;
unsigned int d_size;
@@ -157,6 +172,22 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
return (struct lowpan_802154_cb *)skb->cb;
}
+static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+ const void *lladdr)
+{
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+}
+
#ifdef DEBUG
/* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2a19fe111c78..9a9a8edc138f 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -106,6 +106,7 @@ struct tc_action_ops {
int bind);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int, struct tc_action *);
+ void (*stats_update)(struct tc_action *, u64, u32, u64);
};
struct tc_action_net {
@@ -135,6 +136,7 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
static inline void tc_action_net_exit(struct tc_action_net *tn)
{
tcf_hashinfo_destroy(tn->ops, tn->hinfo);
+ kfree(tn->hinfo);
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
@@ -177,10 +179,21 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#define tc_for_each_action(_a, _exts) \
list_for_each_entry(a, &(_exts)->actions, list)
+
+static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
+ u64 packets, u64 lastuse)
+{
+ if (!a->ops->stats_update)
+ return;
+
+ a->ops->stats_update(a, bytes, packets, lastuse);
+}
+
#else /* CONFIG_NET_CLS_ACT */
#define tc_no_actions(_exts) true
-#define tc_for_each_action(_a, _exts) while (0)
+#define tc_for_each_action(_a, _exts) while ((void)(_a), 0)
+#define tcf_action_stats_update(a, bytes, packets, lastuse)
#endif /* CONFIG_NET_CLS_ACT */
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index e797d45a5ae6..ac1bc3c49fbd 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -12,6 +12,7 @@
#ifndef _NET_RXRPC_H
#define _NET_RXRPC_H
+#include <linux/skbuff.h>
#include <linux/rxrpc.h>
struct rxrpc_call;
@@ -19,11 +20,12 @@ struct rxrpc_call;
/*
* the mark applied to socket buffers that may be intercepted
*/
-enum {
+enum rxrpc_skb_mark {
RXRPC_SKB_MARK_DATA, /* data message */
RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
RXRPC_SKB_MARK_BUSY, /* server busy message */
RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
+ RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
RXRPC_SKB_MARK_NET_ERROR, /* network error message */
RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
RXRPC_SKB_MARK_NEW_CALL, /* local error message */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5d38d980b89d..eefcf3e96421 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -61,6 +61,8 @@
#define HCI_RS232 4
#define HCI_PCI 5
#define HCI_SDIO 6
+#define HCI_SPI 7
+#define HCI_I2C 8
/* HCI controller types */
#define HCI_BREDR 0x00
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9e1b24c29f0c..63921672bed0 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -68,26 +68,6 @@ struct wiphy;
*/
/**
- * enum ieee80211_band - supported frequency bands
- *
- * The bands are assigned this way because the supported
- * bitrates differ in these bands.
- *
- * @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
- * @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
- * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
- * @IEEE80211_NUM_BANDS: number of defined bands
- */
-enum ieee80211_band {
- IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
- IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
- IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ,
-
- /* keep last */
- IEEE80211_NUM_BANDS
-};
-
-/**
* enum ieee80211_channel_flags - channel flags
*
* Channel flags set by the regulatory control code.
@@ -167,7 +147,7 @@ enum ieee80211_channel_flags {
* @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
*/
struct ieee80211_channel {
- enum ieee80211_band band;
+ enum nl80211_band band;
u16 center_freq;
u16 hw_value;
u32 flags;
@@ -324,7 +304,7 @@ struct ieee80211_sta_vht_cap {
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
struct ieee80211_rate *bitrates;
- enum ieee80211_band band;
+ enum nl80211_band band;
int n_channels;
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
@@ -816,6 +796,7 @@ enum station_parameters_apply_mask {
* @supported_oper_classes_len: number of supported operating classes
* @opmode_notif: operating mode field from Operating Mode Notification
* @opmode_notif_used: information if operating mode field is used
+ * @support_p2p_ps: information if station supports P2P PS mechanism
*/
struct station_parameters {
const u8 *supported_rates;
@@ -841,6 +822,7 @@ struct station_parameters {
u8 supported_oper_classes_len;
u8 opmode_notif;
bool opmode_notif_used;
+ int support_p2p_ps;
};
/**
@@ -1063,11 +1045,12 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
*/
struct station_info {
- u32 filled;
+ u64 filled;
u32 connected_time;
u32 inactive_time;
u64 rx_bytes;
@@ -1106,6 +1089,7 @@ struct station_info {
u32 expected_throughput;
u64 rx_beacon;
+ u64 rx_duration;
u8 rx_beacon_signal_avg;
struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
@@ -1368,7 +1352,7 @@ struct mesh_setup {
bool user_mpm;
u8 dtim_period;
u16 beacon_interval;
- int mcast_rate[IEEE80211_NUM_BANDS];
+ int mcast_rate[NUM_NL80211_BANDS];
u32 basic_rates;
};
@@ -1455,6 +1439,7 @@ struct cfg80211_ssid {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -1465,12 +1450,13 @@ struct cfg80211_scan_request {
size_t ie_len;
u32 flags;
- u32 rates[IEEE80211_NUM_BANDS];
+ u32 rates[NUM_NL80211_BANDS];
struct wireless_dev *wdev;
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+ u8 bssid[ETH_ALEN] __aligned(2);
/* internal */
struct wiphy *wiphy;
@@ -1617,7 +1603,7 @@ struct cfg80211_inform_bss {
};
/**
- * struct cfg80211_bss_ie_data - BSS entry IE data
+ * struct cfg80211_bss_ies - BSS entry IE data
* @tsf: TSF contained in the frame that carried these IEs
* @rcu_head: internal use, for freeing
* @len: length of the IEs
@@ -1746,7 +1732,12 @@ enum cfg80211_assoc_req_flags {
* @ie_len: Length of ie buffer in octets
* @use_mfp: Use management frame protection (IEEE 802.11w) in this association
* @crypto: crypto settings
- * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ * to indicate a request to reassociate within the ESS instead of a request
+ * do the initial association with the ESS. When included, this is set to
+ * the BSSID of the current association, i.e., to the value that is
+ * included in the Current AP address field of the Reassociation Request
+ * frame.
* @flags: See &enum cfg80211_assoc_req_flags
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
@@ -1851,12 +1842,39 @@ struct cfg80211_ibss_params {
bool privacy;
bool control_port;
bool userspace_handles_dfs;
- int mcast_rate[IEEE80211_NUM_BANDS];
+ int mcast_rate[NUM_NL80211_BANDS];
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
};
/**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+ enum nl80211_band band;
+ s8 delta;
+};
+
+/**
+ * struct cfg80211_bss_selection - connection parameters for BSS selection.
+ *
+ * @behaviour: requested BSS selection behaviour.
+ * @param: parameters for requestion behaviour.
+ * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ */
+struct cfg80211_bss_selection {
+ enum nl80211_bss_select_attr behaviour;
+ union {
+ enum nl80211_band band_pref;
+ struct cfg80211_bss_select_adjust adjust;
+ } param;
+};
+
+/**
* struct cfg80211_connect_params - Connection parameters
*
* This structure provides information needed to complete IEEE 802.11
@@ -1893,6 +1911,13 @@ struct cfg80211_ibss_params {
* @vht_capa_mask: The bits of vht_capa which are to be used.
* @pbss: if set, connect to a PCP instead of AP. Valid for DMG
* networks.
+ * @bss_select: criteria to be used for BSS selection.
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame. This is used
+ * to indicate a request to reassociate within the ESS instead of a request
+ * do the initial association with the ESS. When included, this is set to
+ * the BSSID of the current association, i.e., to the value that is
+ * included in the Current AP address field of the Reassociation Request
+ * frame.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -1916,6 +1941,8 @@ struct cfg80211_connect_params {
struct ieee80211_vht_cap vht_capa;
struct ieee80211_vht_cap vht_capa_mask;
bool pbss;
+ struct cfg80211_bss_selection bss_select;
+ const u8 *prev_bssid;
};
/**
@@ -1945,7 +1972,7 @@ struct cfg80211_bitrate_mask {
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
enum nl80211_txrate_gi gi;
- } control[IEEE80211_NUM_BANDS];
+ } control[NUM_NL80211_BANDS];
};
/**
* struct cfg80211_pmksa - PMK Security Association
@@ -2342,7 +2369,17 @@ struct cfg80211_qos_map {
* @connect: Connect to the ESS with the specified parameters. When connected,
* call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
* If the connection fails for some reason, call cfg80211_connect_result()
- * with the status from the AP.
+ * with the status from the AP. The driver is allowed to roam to other
+ * BSSes within the ESS when the other BSS matches the connect parameters.
+ * When such roaming is initiated by the driver, the driver is expected to
+ * verify that the target matches the configured security parameters and
+ * to use Reassociation Request frame instead of Association Request frame.
+ * The connect function can also be used to request the driver to perform
+ * a specific roam when connected to an ESS. In that case, the prev_bssid
+ * parameter is set to the BSSID of the currently associated BSS as an
+ * indication of requesting reassociation. In both the driver-initiated and
+ * new connect() call initiated roaming cases, the result of roaming is
+ * indicated with a call to cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
* @disconnect: Disconnect from the BSS/ESS.
* (invoked with the wireless_dev mutex held)
@@ -2622,7 +2659,7 @@ struct cfg80211_ops {
int (*leave_ibss)(struct wiphy *wiphy, struct net_device *dev);
int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
- int rate[IEEE80211_NUM_BANDS]);
+ int rate[NUM_NL80211_BANDS]);
int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
@@ -3152,6 +3189,9 @@ struct wiphy_vendor_command {
* @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden.
* If null, then none can be over-ridden.
*
+ * @wdev_list: the list of associated (virtual) interfaces; this list must
+ * not be modified by the driver, but can be read with RTNL/RCU protection.
+ *
* @max_acl_mac_addrs: Maximum number of MAC addresses that the device
* supports for ACL.
*
@@ -3184,6 +3224,9 @@ struct wiphy_vendor_command {
* low rssi when a frame is heard on different channel, then it should set
* this variable to the maximal offset for which it can compensate.
* This value should be set in MHz.
+ * @bss_select_support: bitmask indicating the BSS selection criteria supported
+ * by the driver in the .connect() callback. The bit position maps to the
+ * attribute indices defined in &enum nl80211_bss_select_attr.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -3265,7 +3308,7 @@ struct wiphy {
* help determine whether you own this wiphy or not. */
const void *privid;
- struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band *bands[NUM_NL80211_BANDS];
/* Lets us get back the wiphy on the callback */
void (*reg_notifier)(struct wiphy *wiphy,
@@ -3288,6 +3331,8 @@ struct wiphy {
const struct ieee80211_ht_cap *ht_capa_mod_mask;
const struct ieee80211_vht_cap *vht_capa_mod_mask;
+ struct list_head wdev_list;
+
/* the network namespace this phy lives in currently */
possible_net_t _net;
@@ -3306,6 +3351,8 @@ struct wiphy {
u8 max_num_csa_counters;
u8 max_adj_channel_rssi_comp;
+ u32 bss_select_support;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3598,7 +3645,7 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
-int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
@@ -3851,7 +3898,7 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
* cfg80211_find_vendor_ie - find vendor specific information element in data
*
* @oui: vendor OUI
- * @oui_type: vendor-specific OUI type
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
* @ies: data consisting of IEs
* @len: length of data
*
@@ -3863,7 +3910,7 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
* Note: There are no checks on the element length other than having to fit into
* the given data.
*/
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len);
/**
@@ -4610,6 +4657,32 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
#endif
/**
+ * cfg80211_connect_bss - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @bssid: the BSSID of the AP
+ * @bss: entry of bss to which STA got connected to, can be obtained
+ * through cfg80211_get_bss (may be %NULL)
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @status: status code, 0 for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures.
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver whenever connect() has
+ * succeeded. This is similar to cfg80211_connect_result(), but with the
+ * option of identifying the exact bss entry for the connection. Only one of
+ * these functions should be called.
+ */
+void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, u16 status, gfp_t gfp);
+
+/**
* cfg80211_connect_result - notify cfg80211 of connection result
*
* @dev: network device
@@ -4626,10 +4699,15 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
* It should be called by the underlying driver whenever connect() has
* succeeded.
*/
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- u16 status, gfp_t gfp);
+static inline void
+cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, gfp_t gfp)
+{
+ cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
+ resp_ie_len, status, gfp);
+}
/**
* cfg80211_roamed - notify cfg80211 of roaming
@@ -5029,7 +5107,7 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
* Returns %true if the conversion was successful, %false otherwise.
*/
bool ieee80211_operating_class_to_band(u8 operating_class,
- enum ieee80211_band *band);
+ enum nl80211_band *band);
/**
* ieee80211_chandef_to_operating_class - convert chandef to operation class
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c0a92e2c286d..74c9693d4941 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -17,6 +17,7 @@
#include <linux/hardirq.h>
#include <linux/rcupdate.h>
#include <net/sock.h>
+#include <net/inet_sock.h>
#ifdef CONFIG_CGROUP_NET_CLASSID
struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
* softirqs always disables bh.
*/
if (in_serving_softirq()) {
+ struct sock *sk = skb_to_full_sk(skb);
+
/* If there is an sock_cgroup_classid we'll use that. */
- if (!skb->sk)
+ if (!sk || !sk_fullsock(sk))
return 0;
- classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
+ classid = sock_cgroup_classid(&sk->sk_cgrp_data);
}
return classid;
diff --git a/include/net/codel.h b/include/net/codel.h
index d168aca115cc..a6e428f80135 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -87,27 +87,6 @@ static inline codel_time_t codel_get_time(void)
((s32)((a) - (b)) >= 0))
#define codel_time_before_eq(a, b) codel_time_after_eq(b, a)
-/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
-struct codel_skb_cb {
- codel_time_t enqueue_time;
-};
-
-static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
-{
- qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
- return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
-static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
-{
- return get_codel_cb(skb)->enqueue_time;
-}
-
-static void codel_set_enqueue_time(struct sk_buff *skb)
-{
- get_codel_cb(skb)->enqueue_time = codel_get_time();
-}
-
static inline u32 codel_time_to_us(codel_time_t val)
{
u64 valns = ((u64)val << CODEL_SHIFT);
@@ -176,198 +155,10 @@ struct codel_stats {
#define CODEL_DISABLED_THRESHOLD INT_MAX
-static void codel_params_init(struct codel_params *params,
- const struct Qdisc *sch)
-{
- params->interval = MS2TIME(100);
- params->target = MS2TIME(5);
- params->mtu = psched_mtu(qdisc_dev(sch));
- params->ce_threshold = CODEL_DISABLED_THRESHOLD;
- params->ecn = false;
-}
-
-static void codel_vars_init(struct codel_vars *vars)
-{
- memset(vars, 0, sizeof(*vars));
-}
-
-static void codel_stats_init(struct codel_stats *stats)
-{
- stats->maxpacket = 0;
-}
-
-/*
- * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
- * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
- *
- * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
- */
-static void codel_Newton_step(struct codel_vars *vars)
-{
- u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
- u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
- u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
-
- val >>= 2; /* avoid overflow in following multiply */
- val = (val * invsqrt) >> (32 - 2 + 1);
-
- vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
-}
-
-/*
- * CoDel control_law is t + interval/sqrt(count)
- * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
- * both sqrt() and divide operation.
- */
-static codel_time_t codel_control_law(codel_time_t t,
- codel_time_t interval,
- u32 rec_inv_sqrt)
-{
- return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
-}
-
-static bool codel_should_drop(const struct sk_buff *skb,
- struct Qdisc *sch,
- struct codel_vars *vars,
- struct codel_params *params,
- struct codel_stats *stats,
- codel_time_t now)
-{
- bool ok_to_drop;
-
- if (!skb) {
- vars->first_above_time = 0;
- return false;
- }
-
- vars->ldelay = now - codel_get_enqueue_time(skb);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
-
- if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
- stats->maxpacket = qdisc_pkt_len(skb);
-
- if (codel_time_before(vars->ldelay, params->target) ||
- sch->qstats.backlog <= params->mtu) {
- /* went below - stay below for at least interval */
- vars->first_above_time = 0;
- return false;
- }
- ok_to_drop = false;
- if (vars->first_above_time == 0) {
- /* just went above from below. If we stay above
- * for at least interval we'll say it's ok to drop
- */
- vars->first_above_time = now + params->interval;
- } else if (codel_time_after(now, vars->first_above_time)) {
- ok_to_drop = true;
- }
- return ok_to_drop;
-}
-
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *skb);
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *skb);
+typedef void (*codel_skb_drop_t)(struct sk_buff *skb, void *ctx);
typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
- struct Qdisc *sch);
+ void *ctx);
-static struct sk_buff *codel_dequeue(struct Qdisc *sch,
- struct codel_params *params,
- struct codel_vars *vars,
- struct codel_stats *stats,
- codel_skb_dequeue_t dequeue_func)
-{
- struct sk_buff *skb = dequeue_func(vars, sch);
- codel_time_t now;
- bool drop;
-
- if (!skb) {
- vars->dropping = false;
- return skb;
- }
- now = codel_get_time();
- drop = codel_should_drop(skb, sch, vars, params, stats, now);
- if (vars->dropping) {
- if (!drop) {
- /* sojourn time below target - leave dropping state */
- vars->dropping = false;
- } else if (codel_time_after_eq(now, vars->drop_next)) {
- /* It's time for the next drop. Drop the current
- * packet and dequeue the next. The dequeue might
- * take us out of dropping state.
- * If not, schedule the next drop.
- * A large backlog might result in drop rates so high
- * that the next drop should happen now,
- * hence the while loop.
- */
- while (vars->dropping &&
- codel_time_after_eq(now, vars->drop_next)) {
- vars->count++; /* dont care of possible wrap
- * since there is no more divide
- */
- codel_Newton_step(vars);
- if (params->ecn && INET_ECN_set_ce(skb)) {
- stats->ecn_mark++;
- vars->drop_next =
- codel_control_law(vars->drop_next,
- params->interval,
- vars->rec_inv_sqrt);
- goto end;
- }
- stats->drop_len += qdisc_pkt_len(skb);
- qdisc_drop(skb, sch);
- stats->drop_count++;
- skb = dequeue_func(vars, sch);
- if (!codel_should_drop(skb, sch,
- vars, params, stats, now)) {
- /* leave dropping state */
- vars->dropping = false;
- } else {
- /* and schedule the next drop */
- vars->drop_next =
- codel_control_law(vars->drop_next,
- params->interval,
- vars->rec_inv_sqrt);
- }
- }
- }
- } else if (drop) {
- u32 delta;
-
- if (params->ecn && INET_ECN_set_ce(skb)) {
- stats->ecn_mark++;
- } else {
- stats->drop_len += qdisc_pkt_len(skb);
- qdisc_drop(skb, sch);
- stats->drop_count++;
-
- skb = dequeue_func(vars, sch);
- drop = codel_should_drop(skb, sch, vars, params,
- stats, now);
- }
- vars->dropping = true;
- /* if min went above target close to when we last went below it
- * assume that the drop rate that controlled the queue on the
- * last cycle is a good starting point to control it now.
- */
- delta = vars->count - vars->lastcount;
- if (delta > 1 &&
- codel_time_before(now - vars->drop_next,
- 16 * params->interval)) {
- vars->count = delta;
- /* we dont care if rec_inv_sqrt approximation
- * is not very precise :
- * Next Newton steps will correct it quadratically.
- */
- codel_Newton_step(vars);
- } else {
- vars->count = 1;
- vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
- }
- vars->lastcount = vars->count;
- vars->drop_next = codel_control_law(now, params->interval,
- vars->rec_inv_sqrt);
- }
-end:
- if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
- INET_ECN_set_ce(skb))
- stats->ce_mark++;
- return skb;
-}
#endif
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
new file mode 100644
index 000000000000..d289b91dcd65
--- /dev/null
+++ b/include/net/codel_impl.h
@@ -0,0 +1,255 @@
+#ifndef __NET_SCHED_CODEL_IMPL_H
+#define __NET_SCHED_CODEL_IMPL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+static void codel_params_init(struct codel_params *params)
+{
+ params->interval = MS2TIME(100);
+ params->target = MS2TIME(5);
+ params->ce_threshold = CODEL_DISABLED_THRESHOLD;
+ params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+ memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+ stats->maxpacket = 0;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+ u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+ u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+ u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+ val >>= 2; /* avoid overflow in following multiply */
+ val = (val * invsqrt) >> (32 - 2 + 1);
+
+ vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+ codel_time_t interval,
+ u32 rec_inv_sqrt)
+{
+ return t + reciprocal_scale(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+static bool codel_should_drop(const struct sk_buff *skb,
+ void *ctx,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ u32 *backlog,
+ codel_time_t now)
+{
+ bool ok_to_drop;
+ u32 skb_len;
+
+ if (!skb) {
+ vars->first_above_time = 0;
+ return false;
+ }
+
+ skb_len = skb_len_func(skb);
+ vars->ldelay = now - skb_time_func(skb);
+
+ if (unlikely(skb_len > stats->maxpacket))
+ stats->maxpacket = skb_len;
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+ *backlog <= params->mtu) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+ }
+ ok_to_drop = false;
+ if (vars->first_above_time == 0) {
+ /* just went above from below. If we stay above
+ * for at least interval we'll say it's ok to drop
+ */
+ vars->first_above_time = now + params->interval;
+ } else if (codel_time_after(now, vars->first_above_time)) {
+ ok_to_drop = true;
+ }
+ return ok_to_drop;
+}
+
+static struct sk_buff *codel_dequeue(void *ctx,
+ u32 *backlog,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+ codel_skb_len_t skb_len_func,
+ codel_skb_time_t skb_time_func,
+ codel_skb_drop_t drop_func,
+ codel_skb_dequeue_t dequeue_func)
+{
+ struct sk_buff *skb = dequeue_func(vars, ctx);
+ codel_time_t now;
+ bool drop;
+
+ if (!skb) {
+ vars->dropping = false;
+ return skb;
+ }
+ now = codel_get_time();
+ drop = codel_should_drop(skb, ctx, vars, params, stats,
+ skb_len_func, skb_time_func, backlog, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+ vars->dropping = false;
+ } else if (codel_time_after_eq(now, vars->drop_next)) {
+ /* It's time for the next drop. Drop the current
+ * packet and dequeue the next. The dequeue might
+ * take us out of dropping state.
+ * If not, schedule the next drop.
+ * A large backlog might result in drop rates so high
+ * that the next drop should happen now,
+ * hence the while loop.
+ */
+ while (vars->dropping &&
+ codel_time_after_eq(now, vars->drop_next)) {
+ vars->count++; /* dont care of possible wrap
+ * since there is no more divide
+ */
+ codel_Newton_step(vars);
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ goto end;
+ }
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
+ stats->drop_count++;
+ skb = dequeue_func(vars, ctx);
+ if (!codel_should_drop(skb, ctx,
+ vars, params, stats,
+ skb_len_func,
+ skb_time_func,
+ backlog, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+ } else {
+ /* and schedule the next drop */
+ vars->drop_next =
+ codel_control_law(vars->drop_next,
+ params->interval,
+ vars->rec_inv_sqrt);
+ }
+ }
+ }
+ } else if (drop) {
+ u32 delta;
+
+ if (params->ecn && INET_ECN_set_ce(skb)) {
+ stats->ecn_mark++;
+ } else {
+ stats->drop_len += skb_len_func(skb);
+ drop_func(skb, ctx);
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, ctx);
+ drop = codel_should_drop(skb, ctx, vars, params,
+ stats, skb_len_func,
+ skb_time_func, backlog, now);
+ }
+ vars->dropping = true;
+ /* if min went above target close to when we last went below it
+ * assume that the drop rate that controlled the queue on the
+ * last cycle is a good starting point to control it now.
+ */
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
+ 16 * params->interval)) {
+ vars->count = delta;
+ /* we dont care if rec_inv_sqrt approximation
+ * is not very precise :
+ * Next Newton steps will correct it quadratically.
+ */
+ codel_Newton_step(vars);
+ } else {
+ vars->count = 1;
+ vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+ }
+ vars->lastcount = vars->count;
+ vars->drop_next = codel_control_law(now, params->interval,
+ vars->rec_inv_sqrt);
+ }
+end:
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+ INET_ECN_set_ce(skb))
+ stats->ce_mark++;
+ return skb;
+}
+
+#endif
diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h
new file mode 100644
index 000000000000..8144d9cd2908
--- /dev/null
+++ b/include/net/codel_qdisc.h
@@ -0,0 +1,73 @@
+#ifndef __NET_SCHED_CODEL_QDISC_H
+#define __NET_SCHED_CODEL_QDISC_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+ codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+ return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+ get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+#endif
diff --git a/include/net/devlink.h b/include/net/devlink.h
index c37d257891d6..1d45b61cb320 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -24,6 +24,7 @@ struct devlink_ops;
struct devlink {
struct list_head list;
struct list_head port_list;
+ struct list_head sb_list;
const struct devlink_ops *ops;
struct device *dev;
possible_net_t _net;
@@ -42,6 +43,12 @@ struct devlink_port {
u32 split_group;
};
+struct devlink_sb_pool_info {
+ enum devlink_sb_pool_type pool_type;
+ u32 size;
+ enum devlink_sb_threshold_type threshold_type;
+};
+
struct devlink_ops {
size_t priv_size;
int (*port_type_set)(struct devlink_port *devlink_port,
@@ -49,6 +56,40 @@ struct devlink_ops {
int (*port_split)(struct devlink *devlink, unsigned int port_index,
unsigned int count);
int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
+ int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+ int (*sb_port_pool_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+ int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+ int (*sb_occ_snapshot)(struct devlink *devlink,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct devlink *devlink,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port,
+ unsigned int sb_index,
+ u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -82,6 +123,11 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_split_set(struct devlink_port *devlink_port,
u32 split_group);
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count);
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
#else
@@ -135,6 +181,21 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port,
{
}
+static inline int devlink_sb_register(struct devlink *devlink,
+ unsigned int sb_index, u32 size,
+ u16 ingress_pools_count,
+ u16 egress_pools_count,
+ u16 ingress_tc_count,
+ u16 egress_tc_count)
+{
+ return 0;
+}
+
+static inline void devlink_sb_unregister(struct devlink *devlink,
+ unsigned int sb_index)
+{
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 6463bb2863ac..17c3d37b6779 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -16,7 +16,6 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/ethtool.h>
@@ -65,13 +64,6 @@ struct dsa_chip_data {
* NULL if there is only one switch chip.
*/
s8 *rtable;
-
- /*
- * A switch may have a GPIO line tied to its reset pin. Parse
- * this from the device tree, and use it before performing
- * switch soft reset.
- */
- struct gpio_desc *reset;
};
struct dsa_platform_data {
@@ -111,6 +103,11 @@ struct dsa_switch_tree {
enum dsa_tag_protocol tag_protocol;
/*
+ * Original copy of the master netdev ethtool_ops
+ */
+ struct ethtool_ops master_ethtool_ops;
+
+ /*
* The switch and port to which the CPU is attached.
*/
s8 cpu_switch;
@@ -123,6 +120,8 @@ struct dsa_switch_tree {
};
struct dsa_switch {
+ struct device *dev;
+
/*
* Parent switch tree, and switch index.
*/
@@ -130,25 +129,21 @@ struct dsa_switch {
int index;
/*
- * Tagging protocol understood by this switch
+ * Give the switch driver somewhere to hang its private data
+ * structure.
*/
- enum dsa_tag_protocol tag_protocol;
+ void *priv;
/*
* Configuration data for this switch.
*/
- struct dsa_chip_data *pd;
+ struct dsa_chip_data *cd;
/*
* The used switch driver.
*/
struct dsa_switch_driver *drv;
- /*
- * Reference to host device to use.
- */
- struct device *master_dev;
-
#ifdef CONFIG_NET_DSA_HWMON
/*
* Hardware monitoring information
@@ -161,7 +156,7 @@ struct dsa_switch {
* Slave mii_bus and devices for the individual ports.
*/
u32 dsa_port_mask;
- u32 phys_port_mask;
+ u32 enabled_port_mask;
u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
struct net_device *ports[DSA_MAX_PORTS];
@@ -179,7 +174,7 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
- return ds->phys_port_mask & (1 << p) && ds->ports[p];
+ return ds->enabled_port_mask & (1 << p) && ds->ports[p];
}
static inline u8 dsa_upstream_port(struct dsa_switch *ds)
@@ -195,7 +190,7 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
if (dst->cpu_switch == ds->index)
return dst->cpu_port;
else
- return ds->pd->rtable[dst->cpu_switch];
+ return ds->cd->rtable[dst->cpu_switch];
}
struct switchdev_trans;
@@ -207,12 +202,13 @@ struct dsa_switch_driver {
struct list_head list;
enum dsa_tag_protocol tag_protocol;
- int priv_size;
/*
* Probing and setup.
*/
- char *(*probe)(struct device *host_dev, int sw_addr);
+ const char *(*probe)(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **priv);
int (*setup)(struct dsa_switch *ds);
int (*set_addr)(struct dsa_switch *ds, u8 *addr);
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
@@ -299,8 +295,8 @@ struct dsa_switch_driver {
int (*port_bridge_join)(struct dsa_switch *ds, int port,
struct net_device *bridge);
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
- int (*port_stp_update)(struct dsa_switch *ds, int port,
- u8 state);
+ void (*port_stp_state_set)(struct dsa_switch *ds, int port,
+ u8 state);
/*
* VLAN support
@@ -310,7 +306,7 @@ struct dsa_switch_driver {
int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
- int (*port_vlan_add)(struct dsa_switch *ds, int port,
+ void (*port_vlan_add)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
int (*port_vlan_del)(struct dsa_switch *ds, int port,
@@ -325,7 +321,7 @@ struct dsa_switch_driver {
int (*port_fdb_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
- int (*port_fdb_add)(struct dsa_switch *ds, int port,
+ void (*port_fdb_add)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
int (*port_fdb_del)(struct dsa_switch *ds, int port,
@@ -341,7 +337,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
static inline void *ds_to_priv(struct dsa_switch *ds)
{
- return (void *)(ds + 1);
+ return ds->priv;
}
static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
diff --git a/include/net/dst.h b/include/net/dst.h
index 5c98443c1c9e..6835d224d47b 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -85,12 +85,11 @@ struct dst_entry {
#endif
#ifdef CONFIG_64BIT
- struct lwtunnel_state *lwtstate;
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
*/
- long __pad_to_align_refcnt[1];
+ long __pad_to_align_refcnt[2];
#endif
/*
* __refcnt wants to be on a different cache line from
@@ -99,9 +98,7 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
-#ifndef CONFIG_64BIT
struct lwtunnel_state *lwtstate;
-#endif
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
diff --git a/include/net/fou.h b/include/net/fou.h
index 19b8a0c62a98..f5cc6910a27e 100644
--- a/include/net/fou.h
+++ b/include/net/fou.h
@@ -9,11 +9,11 @@
#include <net/udp.h>
size_t fou_encap_hlen(struct ip_tunnel_encap *e);
-static size_t gue_encap_hlen(struct ip_tunnel_encap *e);
+size_t gue_encap_hlen(struct ip_tunnel_encap *e);
-int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4);
-int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4);
+int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, __be16 *sport, int type);
+int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, __be16 *sport, int type);
#endif
diff --git a/include/net/fq.h b/include/net/fq.h
new file mode 100644
index 000000000000..268b49049c37
--- /dev/null
+++ b/include/net/fq.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_H
+#define __NET_SCHED_FQ_H
+
+struct fq_tin;
+
+/**
+ * struct fq_flow - per traffic flow queue
+ *
+ * @tin: owner of this flow. Used to manage collisions, i.e. when a packet
+ * hashes to an index which points to a flow that is already owned by a
+ * different tin the packet is destined to. In such case the implementer
+ * must provide a fallback flow
+ * @flowchain: can be linked to fq_tin's new_flows or old_flows. Used for DRR++
+ * (deficit round robin) based round robin queuing similar to the one
+ * found in net/sched/sch_fq_codel.c
+ * @backlogchain: can be linked to other fq_flow and fq. Used to keep track of
+ * fat flows and efficient head-dropping if packet limit is reached
+ * @queue: sk_buff queue to hold packets
+ * @backlog: number of bytes pending in the queue. The number of packets can be
+ * found in @queue.qlen
+ * @deficit: used for DRR++
+ */
+struct fq_flow {
+ struct fq_tin *tin;
+ struct list_head flowchain;
+ struct list_head backlogchain;
+ struct sk_buff_head queue;
+ u32 backlog;
+ int deficit;
+};
+
+/**
+ * struct fq_tin - a logical container of fq_flows
+ *
+ * Used to group fq_flows into a logical aggregate. DRR++ scheme is used to
+ * pull interleaved packets out of the associated flows.
+ *
+ * @new_flows: linked list of fq_flow
+ * @old_flows: linked list of fq_flow
+ */
+struct fq_tin {
+ struct list_head new_flows;
+ struct list_head old_flows;
+ u32 backlog_bytes;
+ u32 backlog_packets;
+ u32 overlimit;
+ u32 collisions;
+ u32 flows;
+ u32 tx_bytes;
+ u32 tx_packets;
+};
+
+/**
+ * struct fq - main container for fair queuing purposes
+ *
+ * @backlogs: linked to fq_flows. Used to maintain fat flows for efficient
+ * head-dropping when @backlog reaches @limit
+ * @limit: max number of packets that can be queued across all flows
+ * @backlog: number of packets queued across all flows
+ */
+struct fq {
+ struct fq_flow *flows;
+ struct list_head backlogs;
+ spinlock_t lock;
+ u32 flows_cnt;
+ u32 perturbation;
+ u32 limit;
+ u32 quantum;
+ u32 backlog;
+ u32 overlimit;
+ u32 collisions;
+};
+
+typedef struct sk_buff *fq_tin_dequeue_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *flow);
+
+typedef void fq_skb_free_t(struct fq *,
+ struct fq_tin *,
+ struct fq_flow *,
+ struct sk_buff *);
+
+typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
+ struct fq_tin *,
+ int idx,
+ struct sk_buff *);
+
+#endif
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
new file mode 100644
index 000000000000..163f3ed0f05a
--- /dev/null
+++ b/include/net/fq_impl.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc
+ *
+ * GPL v2
+ *
+ * Based on net/sched/sch_fq_codel.c
+ */
+#ifndef __NET_SCHED_FQ_IMPL_H
+#define __NET_SCHED_FQ_IMPL_H
+
+#include <net/fq.h>
+
+/* functions that are embedded into includer */
+
+static struct sk_buff *fq_flow_dequeue(struct fq *fq,
+ struct fq_flow *flow)
+{
+ struct fq_tin *tin = flow->tin;
+ struct fq_flow *i;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+ skb = __skb_dequeue(&flow->queue);
+ if (!skb)
+ return NULL;
+
+ tin->backlog_bytes -= skb->len;
+ tin->backlog_packets--;
+ flow->backlog -= skb->len;
+ fq->backlog--;
+
+ if (flow->backlog == 0) {
+ list_del_init(&flow->backlogchain);
+ } else {
+ i = flow;
+
+ list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
+ if (i->backlog < flow->backlog)
+ break;
+
+ list_move_tail(&flow->backlogchain,
+ &i->backlogchain);
+ }
+
+ return skb;
+}
+
+static struct sk_buff *fq_tin_dequeue(struct fq *fq,
+ struct fq_tin *tin,
+ fq_tin_dequeue_t dequeue_func)
+{
+ struct fq_flow *flow;
+ struct list_head *head;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&fq->lock);
+
+begin:
+ head = &tin->new_flows;
+ if (list_empty(head)) {
+ head = &tin->old_flows;
+ if (list_empty(head))
+ return NULL;
+ }
+
+ flow = list_first_entry(head, struct fq_flow, flowchain);
+
+ if (flow->deficit <= 0) {
+ flow->deficit += fq->quantum;
+ list_move_tail(&flow->flowchain,
+ &tin->old_flows);
+ goto begin;
+ }
+
+ skb = dequeue_func(fq, tin, flow);
+ if (!skb) {
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &tin->new_flows) &&
+ !list_empty(&tin->old_flows)) {
+ list_move_tail(&flow->flowchain, &tin->old_flows);
+ } else {
+ list_del_init(&flow->flowchain);
+ flow->tin = NULL;
+ }
+ goto begin;
+ }
+
+ flow->deficit -= skb->len;
+ tin->tx_bytes += skb->len;
+ tin->tx_packets++;
+
+ return skb;
+}
+
+static struct fq_flow *fq_flow_classify(struct fq *fq,
+ struct fq_tin *tin,
+ struct sk_buff *skb,
+ fq_flow_get_default_t get_default_func)
+{
+ struct fq_flow *flow;
+ u32 hash;
+ u32 idx;
+
+ lockdep_assert_held(&fq->lock);
+
+ hash = skb_get_hash_perturb(skb, fq->perturbation);
+ idx = reciprocal_scale(hash, fq->flows_cnt);
+ flow = &fq->flows[idx];
+
+ if (flow->tin && flow->tin != tin) {
+ flow = get_default_func(fq, tin, idx, skb);
+ tin->collisions++;
+ fq->collisions++;
+ }
+
+ if (!flow->tin)
+ tin->flows++;
+
+ return flow;
+}
+
+static void fq_recalc_backlog(struct fq *fq,
+ struct fq_tin *tin,
+ struct fq_flow *flow)
+{
+ struct fq_flow *i;
+
+ if (list_empty(&flow->backlogchain))
+ list_add_tail(&flow->backlogchain, &fq->backlogs);
+
+ i = flow;
+ list_for_each_entry_continue_reverse(i, &fq->backlogs,
+ backlogchain)
+ if (i->backlog > flow->backlog)
+ break;
+
+ list_move(&flow->backlogchain, &i->backlogchain);
+}
+
+static void fq_tin_enqueue(struct fq *fq,
+ struct fq_tin *tin,
+ struct sk_buff *skb,
+ fq_skb_free_t free_func,
+ fq_flow_get_default_t get_default_func)
+{
+ struct fq_flow *flow;
+
+ lockdep_assert_held(&fq->lock);
+
+ flow = fq_flow_classify(fq, tin, skb, get_default_func);
+
+ flow->tin = tin;
+ flow->backlog += skb->len;
+ tin->backlog_bytes += skb->len;
+ tin->backlog_packets++;
+ fq->backlog++;
+
+ fq_recalc_backlog(fq, tin, flow);
+
+ if (list_empty(&flow->flowchain)) {
+ flow->deficit = fq->quantum;
+ list_add_tail(&flow->flowchain,
+ &tin->new_flows);
+ }
+
+ __skb_queue_tail(&flow->queue, skb);
+
+ if (fq->backlog > fq->limit) {
+ flow = list_first_entry_or_null(&fq->backlogs,
+ struct fq_flow,
+ backlogchain);
+ if (!flow)
+ return;
+
+ skb = fq_flow_dequeue(fq, flow);
+ if (!skb)
+ return;
+
+ free_func(fq, flow->tin, flow, skb);
+
+ flow->tin->overlimit++;
+ fq->overlimit++;
+ }
+}
+
+static void fq_flow_reset(struct fq *fq,
+ struct fq_flow *flow,
+ fq_skb_free_t free_func)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_flow_dequeue(fq, flow)))
+ free_func(fq, flow->tin, flow, skb);
+
+ if (!list_empty(&flow->flowchain))
+ list_del_init(&flow->flowchain);
+
+ if (!list_empty(&flow->backlogchain))
+ list_del_init(&flow->backlogchain);
+
+ flow->tin = NULL;
+
+ WARN_ON_ONCE(flow->backlog);
+}
+
+static void fq_tin_reset(struct fq *fq,
+ struct fq_tin *tin,
+ fq_skb_free_t free_func)
+{
+ struct list_head *head;
+ struct fq_flow *flow;
+
+ for (;;) {
+ head = &tin->new_flows;
+ if (list_empty(head)) {
+ head = &tin->old_flows;
+ if (list_empty(head))
+ break;
+ }
+
+ flow = list_first_entry(head, struct fq_flow, flowchain);
+ fq_flow_reset(fq, flow, free_func);
+ }
+
+ WARN_ON_ONCE(tin->backlog_bytes);
+ WARN_ON_ONCE(tin->backlog_packets);
+}
+
+static void fq_flow_init(struct fq_flow *flow)
+{
+ INIT_LIST_HEAD(&flow->flowchain);
+ INIT_LIST_HEAD(&flow->backlogchain);
+ __skb_queue_head_init(&flow->queue);
+}
+
+static void fq_tin_init(struct fq_tin *tin)
+{
+ INIT_LIST_HEAD(&tin->new_flows);
+ INIT_LIST_HEAD(&tin->old_flows);
+}
+
+static int fq_init(struct fq *fq, int flows_cnt)
+{
+ int i;
+
+ memset(fq, 0, sizeof(fq[0]));
+ INIT_LIST_HEAD(&fq->backlogs);
+ spin_lock_init(&fq->lock);
+ fq->flows_cnt = max_t(u32, flows_cnt, 1);
+ fq->perturbation = prandom_u32();
+ fq->quantum = 300;
+ fq->limit = 8192;
+
+ fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
+ if (!fq->flows)
+ return -ENOMEM;
+
+ for (i = 0; i < fq->flows_cnt; i++)
+ fq_flow_init(&fq->flows[i]);
+
+ return 0;
+}
+
+static void fq_reset(struct fq *fq,
+ fq_skb_free_t free_func)
+{
+ int i;
+
+ for (i = 0; i < fq->flows_cnt; i++)
+ fq_flow_reset(fq, &fq->flows[i], free_func);
+
+ kfree(fq->flows);
+ fq->flows = NULL;
+}
+
+#endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index cbafa3768d48..610cd397890e 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -19,17 +19,19 @@ struct gnet_dump {
/* Backward compatibility */
int compat_tc_stats;
int compat_xstats;
+ int padattr;
void * xstats;
int xstats_len;
struct tc_stats tc_stats;
};
int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
- struct gnet_dump *d);
+ struct gnet_dump *d, int padattr);
int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
int tc_stats_type, int xstats_type,
- spinlock_t *lock, struct gnet_dump *d);
+ spinlock_t *lock, struct gnet_dump *d,
+ int padattr);
int gnet_stats_copy_basic(struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
diff --git a/include/net/geneve.h b/include/net/geneve.h
index e6c23dc765f7..cb544a530146 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,13 +62,11 @@ struct genevehdr {
struct geneve_opt options[];
};
-#if IS_ENABLED(CONFIG_GENEVE)
-void geneve_get_rx_port(struct net_device *netdev);
-#else
static inline void geneve_get_rx_port(struct net_device *netdev)
{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_GENEVE, netdev);
}
-#endif
#ifdef CONFIG_INET
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
diff --git a/include/net/gre.h b/include/net/gre.h
index 97eafdc47eea..5dce30a6abe3 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type);
+int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ bool *csum_err, __be16 proto);
+
+static inline int gre_calc_hlen(__be16 o_flags)
+{
+ int addend = 4;
+
+ if (o_flags & TUNNEL_CSUM)
+ addend += 4;
+ if (o_flags & TUNNEL_KEY)
+ addend += 4;
+ if (o_flags & TUNNEL_SEQ)
+ addend += 4;
+ return addend;
+}
+
+static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+ __be16 tflags = 0;
+
+ if (flags & GRE_CSUM)
+ tflags |= TUNNEL_CSUM;
+ if (flags & GRE_ROUTING)
+ tflags |= TUNNEL_ROUTING;
+ if (flags & GRE_KEY)
+ tflags |= TUNNEL_KEY;
+ if (flags & GRE_SEQ)
+ tflags |= TUNNEL_SEQ;
+ if (flags & GRE_STRICT)
+ tflags |= TUNNEL_STRICT;
+ if (flags & GRE_REC)
+ tflags |= TUNNEL_REC;
+ if (flags & GRE_VERSION)
+ tflags |= TUNNEL_VERSION;
+
+ return tflags;
+}
+
+static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
+{
+ __be16 flags = 0;
+
+ if (tflags & TUNNEL_CSUM)
+ flags |= GRE_CSUM;
+ if (tflags & TUNNEL_ROUTING)
+ flags |= GRE_ROUTING;
+ if (tflags & TUNNEL_KEY)
+ flags |= GRE_KEY;
+ if (tflags & TUNNEL_SEQ)
+ flags |= GRE_SEQ;
+ if (tflags & TUNNEL_STRICT)
+ flags |= GRE_STRICT;
+ if (tflags & TUNNEL_REC)
+ flags |= GRE_REC;
+ if (tflags & TUNNEL_VERSION)
+ flags |= GRE_VERSION;
+
+ return flags;
+}
+
+static inline __sum16 gre_checksum(struct sk_buff *skb)
+{
+ __wsum csum;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ csum = lco_csum(skb);
+ else
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ return csum_fold(csum);
+}
+
+static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
+ __be16 flags, __be16 proto,
+ __be32 key, __be32 seq)
+{
+ struct gre_base_hdr *greh;
+
+ skb_push(skb, hdr_len);
+
+ skb_reset_transport_header(skb);
+ greh = (struct gre_base_hdr *)skb->data;
+ greh->flags = gre_tnl_flags_to_gre_flags(flags);
+ greh->protocol = proto;
+
+ if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ if (flags & TUNNEL_SEQ) {
+ *ptr = seq;
+ ptr--;
+ }
+ if (flags & TUNNEL_KEY) {
+ *ptr = key;
+ ptr--;
+ }
+ if (flags & TUNNEL_CSUM &&
+ !(skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+ *ptr = 0;
+ *(__sum16 *)ptr = gre_checksum(skb);
+ }
+ }
+}
+
#endif
diff --git a/include/net/gtp.h b/include/net/gtp.h
new file mode 100644
index 000000000000..894a37b87d63
--- /dev/null
+++ b/include/net/gtp.h
@@ -0,0 +1,34 @@
+#ifndef _GTP_H_
+#define _GTP_H
+
+/* General GTP protocol related definitions. */
+
+#define GTP0_PORT 3386
+#define GTP1U_PORT 2152
+
+#define GTP_TPDU 255
+
+struct gtp0_header { /* According to GSM TS 09.60. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be16 seq;
+ __be16 flow;
+ __u8 number;
+ __u8 spare[3];
+ __be64 tid;
+} __attribute__ ((packed));
+
+struct gtp1_header { /* According to 3GPP TS 29.060. */
+ __u8 flags;
+ __u8 type;
+ __be16 length;
+ __be32 tid;
+} __attribute__ ((packed));
+
+#define GTP1_F_NPDU 0x01
+#define GTP1_F_SEQ 0x02
+#define GTP1_F_EXTHDR 0x04
+#define GTP1_F_MASK 0x07
+
+#endif
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 970028e13382..3ef2743a8eec 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -30,9 +30,9 @@ struct icmp_err {
extern const struct icmp_err icmp_err_convert[];
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
-#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
+#define __ICMP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
struct dst_entry;
struct net_proto_family;
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 28332bdac333..b87becacd9d3 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -66,13 +66,15 @@ static inline struct sock *__inet6_lookup(struct net *net,
const __be16 sport,
const struct in6_addr *daddr,
const u16 hnum,
- const int dif)
+ const int dif,
+ bool *refcounted)
{
struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
sport, daddr, hnum, dif);
+ *refcounted = true;
if (sk)
return sk;
-
+ *refcounted = false;
return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
daddr, hnum, dif);
}
@@ -81,17 +83,19 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be16 sport,
const __be16 dport,
- int iif)
+ int iif,
+ bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb);
+ *refcounted = true;
if (sk)
return sk;
return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
doff, &ipv6_hdr(skb)->saddr, sport,
&ipv6_hdr(skb)->daddr, ntohs(dport),
- iif);
+ iif, refcounted);
}
struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 109e3ee9108c..5d683428fced 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -39,6 +39,11 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
+struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int inet_gro_complete(struct sk_buff *skb, int nhoff);
+struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
if (sk)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 50f635c2c536..0574493e3899 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -100,14 +100,10 @@ struct inet_bind_hashbucket {
/*
* Sockets can be hashed in established or listening table
- * We must use different 'nulls' end-of-chain value for listening
- * hash table, or we might find a socket that was closed and
- * reallocated/inserted into established hash table
*/
-#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
- struct hlist_nulls_head head;
+ struct hlist_head head;
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
@@ -280,11 +276,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
-/*
- * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
+/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
- *
- * Local BH must be disabled here.
*/
struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo,
@@ -307,14 +300,20 @@ static inline struct sock *__inet_lookup(struct net *net,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
- const int dif)
+ const int dif,
+ bool *refcounted)
{
u16 hnum = ntohs(dport);
- struct sock *sk = __inet_lookup_established(net, hashinfo,
- saddr, sport, daddr, hnum, dif);
+ struct sock *sk;
- return sk ? : __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
- sport, daddr, hnum, dif);
+ sk = __inet_lookup_established(net, hashinfo, saddr, sport,
+ daddr, hnum, dif);
+ *refcounted = true;
+ if (sk)
+ return sk;
+ *refcounted = false;
+ return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+ sport, daddr, hnum, dif);
}
static inline struct sock *inet_lookup(struct net *net,
@@ -325,12 +324,13 @@ static inline struct sock *inet_lookup(struct net *net,
const int dif)
{
struct sock *sk;
+ bool refcounted;
- local_bh_disable();
sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
- dport, dif);
- local_bh_enable();
+ dport, dif, &refcounted);
+ if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
return sk;
}
@@ -338,17 +338,20 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
int doff,
const __be16 sport,
- const __be16 dport)
+ const __be16 dport,
+ bool *refcounted)
{
struct sock *sk = skb_steal_sock(skb);
const struct iphdr *iph = ip_hdr(skb);
+ *refcounted = true;
if (sk)
return sk;
- else
- return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
- doff, iph->saddr, sport,
- iph->daddr, dport, inet_iif(skb));
+
+ return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ doff, iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ refcounted);
}
u32 sk_ehashfn(const struct sock *sk);
diff --git a/include/net/ip.h b/include/net/ip.h
index fad74d323bd6..37165fba3741 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -36,6 +36,7 @@
struct sock;
struct inet_skb_parm {
+ int iif;
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
@@ -56,6 +57,7 @@ static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
}
struct ipcm_cookie {
+ struct sockcm_cookie sockc;
__be32 addr;
int oif;
struct ip_options_rcu *opt;
@@ -186,17 +188,15 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
unsigned int len);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
-#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field)
+#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
-#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
+#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
-#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
-#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
+#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
-#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
+#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt);
@@ -550,7 +550,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
-int ip_cmsg_send(struct net *net, struct msghdr *msg,
+int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 295d291269e2..54c779416eec 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr, bool anycast);
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+ int flags);
+
/*
* support functions for ND
*
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 499a707765ea..d325c81332e3 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -42,6 +42,7 @@ struct ip6_tnl {
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
struct dst_cache dst_cache; /* cached dst */
+ struct gro_cells gro_cells;
int err_count;
unsigned long err_time;
@@ -49,10 +50,70 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */
- int hlen; /* Precalculated GRE header length */
+ int hlen; /* tun_hlen + encap_hlen */
+ int tun_hlen; /* Precalculated header length */
+ int encap_hlen; /* Encap header length (FOU,GUE) */
+ struct ip_tunnel_encap encap;
int mlink;
};
+struct ip6_tnl_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi6 *fl6);
+};
+
+extern const struct ip6_tnl_encap_ops __rcu *
+ ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num);
+int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num);
+int ip6_tnl_encap_setup(struct ip6_tnl *t,
+ struct ip_tunnel_encap *ipencap);
+
+static inline int ip6_encap_hlen(struct ip_tunnel_encap *e)
+{
+ const struct ip6_tnl_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(ip6tun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+static inline int ip6_tnl_encap(struct sk_buff *skb, struct ip6_tnl *t,
+ u8 *protocol, struct flowi6 *fl6)
+{
+ const struct ip6_tnl_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(ip6tun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl6);
+ rcu_read_unlock();
+
+ return ret;
+}
+
/* Tunnel encapsulation limit destination sub-option */
struct ipv6_tlv_tnl_enc_lim {
@@ -63,13 +124,19 @@ struct ipv6_tlv_tnl_enc_lim {
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
+int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
+ const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+ bool log_ecn_error);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
+int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
struct net *ip6_tnl_get_link_net(const struct net_device *dev);
int ip6_tnl_get_iflink(const struct net_device *dev);
+int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
#ifdef CONFIG_INET
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 56050f913339..dbf444428437 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -105,24 +105,23 @@ struct ip_tunnel {
struct net_device *dev;
struct net *net; /* netns for packet i/o */
- int err_count; /* Number of arrived ICMP errors */
unsigned long err_time; /* Time when the last ICMP error
* arrived */
+ int err_count; /* Number of arrived ICMP errors */
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
- int mlink;
struct dst_cache dst_cache;
struct ip_tunnel_parm parms;
+ int mlink;
int encap_hlen; /* Encap header length (FOU,GUE) */
- struct ip_tunnel_encap encap;
-
int hlen; /* tun_hlen + encap_hlen */
+ struct ip_tunnel_encap encap;
/* for SIT */
#ifdef CONFIG_IPV6_SIT_6RD
@@ -161,6 +160,7 @@ struct tnl_ptk_info {
#define PACKET_RCVD 0
#define PACKET_REJECT 1
+#define PACKET_NEXT 2
#define IP_TNL_HASH_BITS 7
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
@@ -171,22 +171,6 @@ struct ip_tunnel_net {
struct ip_tunnel __rcu *collect_md_tun;
};
-struct ip_tunnel_encap_ops {
- size_t (*encap_hlen)(struct ip_tunnel_encap *e);
- int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
- u8 *protocol, struct flowi4 *fl4);
-};
-
-#define MAX_IPTUN_ENCAP_OPS 8
-
-extern const struct ip_tunnel_encap_ops __rcu *
- iptun_encaps[MAX_IPTUN_ENCAP_OPS];
-
-int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
- unsigned int num);
-int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
- unsigned int num);
-
static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
__be32 saddr, __be32 daddr,
u8 tos, u8 ttl, __be32 label,
@@ -251,8 +235,6 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
-int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
- u8 *protocol, struct flowi4 *fl4);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
@@ -271,9 +253,67 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
+
+struct ip_tunnel_encap_ops {
+ size_t (*encap_hlen)(struct ip_tunnel_encap *e);
+ int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ u8 *protocol, struct flowi4 *fl4);
+};
+
+#define MAX_IPTUN_ENCAP_OPS 8
+
+extern const struct ip_tunnel_encap_ops __rcu *
+ iptun_encaps[MAX_IPTUN_ENCAP_OPS];
+
+int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
+ unsigned int num);
+
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
+static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
+{
+ const struct ip_tunnel_encap_ops *ops;
+ int hlen = -EINVAL;
+
+ if (e->type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (e->type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[e->type]);
+ if (likely(ops && ops->encap_hlen))
+ hlen = ops->encap_hlen(e);
+ rcu_read_unlock();
+
+ return hlen;
+}
+
+static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ const struct ip_tunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (t->encap.type == TUNNEL_ENCAP_NONE)
+ return 0;
+
+ if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
+ return -EINVAL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(iptun_encaps[t->encap.type]);
+ if (likely(ops && ops->build_header))
+ ret = ops->build_header(skb, &t->encap, protocol, fl4);
+ rcu_read_unlock();
+
+ return ret;
+}
+
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
const struct sk_buff *skb)
@@ -295,15 +335,22 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
return INET_ECN_encapsulate(tos, inner);
}
-int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto,
- bool xnet);
+int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool raw_proto, bool xnet);
+
+static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
+ __be16 inner_proto, bool xnet)
+{
+ return __iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
+}
+
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
-struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
{
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a6cc576fd467..af4c10ebb241 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -731,6 +731,12 @@ struct ip_vs_pe {
u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval,
bool inverse);
int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf);
+ /* create connections for real-server outgoing packets */
+ struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc,
+ struct ip_vs_dest *dest,
+ struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph,
+ __be16 dport, __be16 cport);
};
/* The application module object (a.k.a. app incarnation) */
@@ -874,6 +880,7 @@ struct netns_ipvs {
/* Service counters */
atomic_t ftpsvc_counter;
atomic_t nullsvc_counter;
+ atomic_t conn_out_counter;
#ifdef CONFIG_SYSCTL
/* 1/rate drop and drop-entry variables */
@@ -1147,6 +1154,12 @@ static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
*/
const char *ip_vs_proto_name(unsigned int proto);
void ip_vs_init_hash_table(struct list_head *table, int rows);
+struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
+ struct ip_vs_dest *dest,
+ struct sk_buff *skb,
+ const struct ip_vs_iphdr *iph,
+ __be16 dport,
+ __be16 cport);
#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
#define IP_VS_APP_TYPE_FTP 1
@@ -1378,6 +1391,10 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport);
+struct ip_vs_dest *
+ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
+ const union nf_inet_addr *daddr, __be16 dport);
+
int ip_vs_use_count_inc(void);
void ip_vs_use_count_dec(void);
int ip_vs_register_nl_ioctl(void);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d0aeb97aec5d..11a045281948 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -121,21 +121,21 @@ struct frag_hdr {
extern int sysctl_mld_max_msf;
extern int sysctl_mld_qrv;
-#define _DEVINC(net, statname, modifier, idev, field) \
+#define _DEVINC(net, statname, mod, idev, field) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
- SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \
- SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+ mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
+ mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
})
/* per device counters are atomic_long_t */
-#define _DEVINCATOMIC(net, statname, modifier, idev, field) \
+#define _DEVINCATOMIC(net, statname, mod, idev, field) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
- SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
+ mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
})
/* per device and per net counters are atomic_long_t */
@@ -147,46 +147,44 @@ extern int sysctl_mld_qrv;
SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
})
-#define _DEVADD(net, statname, modifier, idev, field, val) \
+#define _DEVADD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
- SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \
- SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\
+ mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
+ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
})
-#define _DEVUPD(net, statname, modifier, idev, field, val) \
+#define _DEVUPD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \
- SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \
- SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\
+ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
+ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
})
/* MIBs */
#define IP6_INC_STATS(net, idev,field) \
- _DEVINC(net, ipv6, 64, idev, field)
-#define IP6_INC_STATS_BH(net, idev,field) \
- _DEVINC(net, ipv6, 64_BH, idev, field)
+ _DEVINC(net, ipv6, , idev, field)
+#define __IP6_INC_STATS(net, idev,field) \
+ _DEVINC(net, ipv6, __, idev, field)
#define IP6_ADD_STATS(net, idev,field,val) \
- _DEVADD(net, ipv6, 64, idev, field, val)
-#define IP6_ADD_STATS_BH(net, idev,field,val) \
- _DEVADD(net, ipv6, 64_BH, idev, field, val)
+ _DEVADD(net, ipv6, , idev, field, val)
+#define __IP6_ADD_STATS(net, idev,field,val) \
+ _DEVADD(net, ipv6, __, idev, field, val)
#define IP6_UPD_PO_STATS(net, idev,field,val) \
- _DEVUPD(net, ipv6, 64, idev, field, val)
-#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \
- _DEVUPD(net, ipv6, 64_BH, idev, field, val)
+ _DEVUPD(net, ipv6, , idev, field, val)
+#define __IP6_UPD_PO_STATS(net, idev,field,val) \
+ _DEVUPD(net, ipv6, __, idev, field, val)
#define ICMP6_INC_STATS(net, idev, field) \
_DEVINCATOMIC(net, icmpv6, , idev, field)
-#define ICMP6_INC_STATS_BH(net, idev, field) \
- _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
+#define __ICMP6_INC_STATS(net, idev, field) \
+ _DEVINCATOMIC(net, icmpv6, __, idev, field)
#define ICMP6MSGOUT_INC_STATS(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \
- _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
-#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
+#define ICMP6MSGIN_INC_STATS(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
struct ip6_ra_chain {
@@ -253,6 +251,13 @@ struct ipv6_fl_socklist {
struct rcu_head rcu;
};
+struct ipcm6_cookie {
+ __s16 hlimit;
+ __s16 tclass;
+ __s8 dontfrag;
+ struct ipv6_txoptions *opt;
+};
+
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
{
struct ipv6_txoptions *opt;
@@ -865,9 +870,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
int ip6_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
- void *from, int length, int transhdrlen, int hlimit,
- int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags, int dontfrag);
+ void *from, int length, int transhdrlen,
+ struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags,
+ const struct sockcm_cookie *sockc);
int ip6_push_pending_frames(struct sock *sk);
@@ -882,9 +888,9 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
- int hlimit, int tclass, struct ipv6_txoptions *opt,
- struct flowi6 *fl6, struct rt6_info *rt,
- unsigned int flags, int dontfrag);
+ struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
+ struct rt6_info *rt, unsigned int flags,
+ const struct sockcm_cookie *sockc);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
{
@@ -959,6 +965,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index c43a9c73de5e..374388dc01c8 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -25,6 +25,8 @@
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
+ struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
+ struct sk_buff *skb, u16 proto);
/* IPv4 ops */
struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
@@ -130,51 +132,36 @@ static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
return rc;
}
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
- struct flowi4 *fl4)
-{
- struct net_device *dev;
- int rc = 0;
+int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
- if (ifindex) {
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6);
- rcu_read_lock();
+static inline
+struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
+{
+ struct net_device *master = NULL;
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_saddr) {
- rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
- }
+ if (netif_is_l3_slave(skb->dev))
+ master = netdev_master_upper_dev_get_rcu(skb->dev);
+ else if (netif_is_l3_master(skb->dev))
+ master = skb->dev;
- rcu_read_unlock();
- }
+ if (master && master->l3mdev_ops->l3mdev_l3_rcv)
+ skb = master->l3mdev_ops->l3mdev_l3_rcv(master, skb, proto);
- return rc;
+ return skb;
}
-static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
- const struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
- return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
-
- return NULL;
+ return l3mdev_l3_rcv(skb, AF_INET);
}
static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
- const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
- struct dst_entry *dst = NULL;
- struct net_device *dev;
-
- dev = dev_get_by_index(net, fl6->flowi6_oif);
- if (dev) {
- dst = l3mdev_get_rt6_dst(dev, fl6);
- dev_put(dev);
- }
-
- return dst;
+ return l3mdev_l3_rcv(skb, AF_INET6);
}
#else
@@ -233,16 +220,21 @@ static inline int l3mdev_get_saddr(struct net *net, int ifindex,
}
static inline
-struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
- const struct flowi6 *fl6)
+struct dst_entry *l3mdev_get_rt6_dst(struct net *net, const struct flowi6 *fl6)
{
return NULL;
}
+
static inline
-struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
- const struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- return NULL;
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+{
+ return skb;
}
#endif
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0c09da34b67a..be30b0549b88 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -291,7 +291,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_PS: PS changed for this BSS (STA mode)
* @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
* @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
- * changed (currently only in P2P client mode, GO mode will be later)
+ * changed
* @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available:
* currently dtim_period only is under consideration.
* @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
@@ -526,6 +526,9 @@ struct ieee80211_mu_group_data {
* userspace), whereas TPC is disabled if %txpower_type is set to
* NL80211_TX_POWER_FIXED (use value configured from userspace)
* @p2p_noa_attr: P2P NoA attribute for P2P powersave
+ * @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed
+ * to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS
+ * if it has associated clients without P2P PS support.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -546,7 +549,7 @@ struct ieee80211_bss_conf {
u8 sync_dtim_count;
u32 basic_rates;
struct ieee80211_rate *beacon_rate;
- int mcast_rate[IEEE80211_NUM_BANDS];
+ int mcast_rate[NUM_NL80211_BANDS];
u16 ht_operation_mode;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
@@ -563,6 +566,7 @@ struct ieee80211_bss_conf {
int txpower;
enum nl80211_tx_power_setting txpower_type;
struct ieee80211_p2p_noa_attr p2p_noa_attr;
+ bool allow_p2p_go_ps;
};
/**
@@ -709,6 +713,7 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
* frame (PS-Poll or uAPSD).
* @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
+ * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
*
* These flags are used in tx_info->control.flags.
*/
@@ -716,6 +721,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
+ IEEE80211_TX_CTRL_AMSDU = BIT(3),
};
/*
@@ -932,8 +938,8 @@ struct ieee80211_tx_info {
* @common_ie_len: length of the common_ies
*/
struct ieee80211_scan_ies {
- const u8 *ies[IEEE80211_NUM_BANDS];
- size_t len[IEEE80211_NUM_BANDS];
+ const u8 *ies[NUM_NL80211_BANDS];
+ size_t len[NUM_NL80211_BANDS];
const u8 *common_ies;
size_t common_ie_len;
};
@@ -1001,6 +1007,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* flag indicates that the PN was verified for replay protection.
* Note that this flag is also currently only supported when a frame
* is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
+ * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did
+ * de-duplication by itself.
* @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -1034,6 +1042,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* on this subframe
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
+ * done by the hardware
* @RX_FLAG_LDPC: LDPC was used
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
* processing it in any regular way.
@@ -1058,6 +1068,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
* radiotap data in the skb->data (before the frame) as described by
* the &struct ieee80211_vendor_radiotap.
+ * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
+ * This is used for AMSDU subframes which can have the same PN as
+ * the first subframe.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1091,6 +1104,8 @@ enum mac80211_rx_flags {
RX_FLAG_5MHZ = BIT(29),
RX_FLAG_AMSDU_MORE = BIT(30),
RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
+ RX_FLAG_MIC_STRIPPED = BIT_ULL(32),
+ RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -1120,6 +1135,8 @@ enum mac80211_rx_vht_flags {
*
* @mactime: value in microseconds of the 64-bit Time Synchronization Function
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
+ * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is
+ * needed only for beacons and probe responses that update the scan cache.
* @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
* it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
@@ -1146,9 +1163,10 @@ enum mac80211_rx_vht_flags {
*/
struct ieee80211_rx_status {
u64 mactime;
+ u64 boottime_ns;
u32 device_timestamp;
u32 ampdu_reference;
- u32 flag;
+ u64 flag;
u16 freq;
u8 vht_flag;
u8 rate_idx;
@@ -1735,10 +1753,12 @@ struct ieee80211_sta_rates {
* size is min(max_amsdu_len, 7935) bytes.
* Both additional HT limits must be enforced by the low level driver.
* This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
+ * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
+ * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
* @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
*/
struct ieee80211_sta {
- u32 supp_rates[IEEE80211_NUM_BANDS];
+ u32 supp_rates[NUM_NL80211_BANDS];
u8 addr[ETH_ALEN];
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
@@ -1755,6 +1775,8 @@ struct ieee80211_sta {
bool mfp;
u8 max_amsdu_subframes;
u16 max_amsdu_len;
+ bool support_p2p_ps;
+ u16 max_rc_amsdu_len;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
@@ -1968,6 +1990,18 @@ struct ieee80211_txq {
* order and does not need to manage its own reorder buffer or BA session
* timeout.
*
+ * @IEEE80211_HW_USES_RSS: The device uses RSS and thus requires parallel RX,
+ * which implies using per-CPU station statistics.
+ *
+ * @IEEE80211_HW_TX_AMSDU: Hardware (or driver) supports software aggregated
+ * A-MSDU frames. Requires software tx queueing and fast-xmit support.
+ * When not using minstrel/minstrel_ht rate control, the driver must
+ * limit the maximum A-MSDU size based on the current tx rate by setting
+ * max_rc_amsdu_len in struct ieee80211_sta.
+ *
+ * @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
+ * skbs, needed for zero-copy software A-MSDU.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2005,6 +2039,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_BEACON_TX_STATUS,
IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
+ IEEE80211_HW_USES_RSS,
+ IEEE80211_HW_TX_AMSDU,
+ IEEE80211_HW_TX_FRAG_LIST,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2077,6 +2114,9 @@ enum ieee80211_hw_flags {
* size is smaller (an example is LinkSys WRT120N with FW v1.0.07
* build 002 Jun 18 2012).
*
+ * @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
+ * of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
+ *
* @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
* (if %IEEE80211_HW_QUEUE_CONTROL is set)
*
@@ -2131,6 +2171,7 @@ struct ieee80211_hw {
u8 max_rate_tries;
u8 max_rx_aggregation_subframes;
u8 max_tx_aggregation_subframes;
+ u8 max_tx_fragments;
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
u16 radiotap_vht_details;
@@ -3348,6 +3389,10 @@ enum ieee80211_reconfig_type {
* the function call.
*
* @wake_tx_queue: Called when new packets have been added to the queue.
+ * @sync_rx_queues: Process all pending frames in RSS queues. This is a
+ * synchronization which is needed in case driver has in its RSS queues
+ * pending frames that were received prior to the control path action
+ * currently taken (e.g. disassociation) but are not processed yet.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3585,6 +3630,7 @@ struct ieee80211_ops {
void (*wake_tx_queue)(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
+ void (*sync_rx_queues)(struct ieee80211_hw *hw);
};
/**
@@ -3838,11 +3884,12 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
* This function must be called with BHs disabled.
*
* @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
* @skb: the buffer to receive, owned by mac80211 after this call
* @napi: the NAPI context
*/
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct napi_struct *napi);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct napi_struct *napi);
/**
* ieee80211_rx - receive frame
@@ -3866,7 +3913,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- ieee80211_rx_napi(hw, skb, NULL);
+ ieee80211_rx_napi(hw, NULL, skb, NULL);
}
/**
@@ -3949,6 +3996,33 @@ static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
return ret;
}
+/**
+ * ieee80211_sta_pspoll - PS-Poll frame received
+ * @sta: currently connected station
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a PS-Poll frame from a
+ * connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_uapsd_trigger(); calls to all three must
+ * be serialized.
+ */
+void ieee80211_sta_pspoll(struct ieee80211_sta *sta);
+
+/**
+ * ieee80211_sta_uapsd_trigger - (potential) U-APSD trigger frame received
+ * @sta: currently connected station
+ * @tid: TID of the received (potential) trigger frame
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS flag set,
+ * use this function to inform mac80211 that a (potential) trigger frame
+ * from a connected station was received.
+ * This must be used in conjunction with ieee80211_sta_ps_transition()
+ * and possibly ieee80211_sta_pspoll(); calls to all three must be
+ * serialized.
+ */
+void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
+
/*
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
@@ -4361,7 +4435,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
*/
__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
size_t frame_len,
struct ieee80211_rate *rate);
@@ -5314,7 +5388,7 @@ struct rate_control_ops {
};
static inline int rate_supported(struct ieee80211_sta *sta,
- enum ieee80211_band band,
+ enum nl80211_band band,
int index)
{
return (sta == NULL || sta->supp_rates[band] & BIT(index));
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 6cd7a70706a9..e465c8551ac3 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -288,6 +288,16 @@ static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
}
/**
+ * ieee802154_be16_to_le16 - copies and convert be16 to le16
+ * @le16_dst: le16 destination pointer
+ * @be16_src: be16 source pointer
+ */
+static inline void ieee802154_be16_to_le16(void *le16_dst, const void *be16_src)
+{
+ put_unaligned_le16(get_unaligned_be16(be16_src), le16_dst);
+}
+
+/**
* ieee802154_alloc_hw - Allocate a new hardware device
*
* This must be called once for each hardware device. The returned pointer
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index fde4068eec0b..dd78bea227c8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -289,8 +289,6 @@ struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
-extern unsigned int nf_conntrack_hash_rnd;
-void init_nf_conntrack_hash_rnd(void);
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 62e17d1319ff..3e2f3328945c 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -81,6 +81,7 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
#define CONNTRACK_LOCKS 1024
+extern struct hlist_nulls_head *nf_conntrack_hash;
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 57c880378443..fa36447371c6 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -73,6 +73,8 @@ void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *nb);
void nf_ct_deliver_cached_events(struct nf_conn *ct);
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+ u32 portid, int report);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -91,69 +93,25 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
}
static inline int
-nf_conntrack_eventmask_report(unsigned int eventmask,
- struct nf_conn *ct,
- u32 portid,
- int report)
-{
- int ret = 0;
- struct net *net = nf_ct_net(ct);
- struct nf_ct_event_notifier *notify;
- struct nf_conntrack_ecache *e;
-
- rcu_read_lock();
- notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
- if (notify == NULL)
- goto out_unlock;
-
- e = nf_ct_ecache_find(ct);
- if (e == NULL)
- goto out_unlock;
-
- if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) {
- struct nf_ct_event item = {
- .ct = ct,
- .portid = e->portid ? e->portid : portid,
- .report = report
- };
- /* This is a resent of a destroy event? If so, skip missed */
- unsigned long missed = e->portid ? 0 : e->missed;
-
- if (!((eventmask | missed) & e->ctmask))
- goto out_unlock;
-
- ret = notify->fcn(eventmask | missed, &item);
- if (unlikely(ret < 0 || missed)) {
- spin_lock_bh(&ct->lock);
- if (ret < 0) {
- /* This is a destroy event that has been
- * triggered by a process, we store the PORTID
- * to include it in the retransmission. */
- if (eventmask & (1 << IPCT_DESTROY) &&
- e->portid == 0 && portid != 0)
- e->portid = portid;
- else
- e->missed |= eventmask;
- } else
- e->missed &= ~missed;
- spin_unlock_bh(&ct->lock);
- }
- }
-out_unlock:
- rcu_read_unlock();
- return ret;
-}
-
-static inline int
nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
u32 portid, int report)
{
+ const struct net *net = nf_ct_net(ct);
+
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+ return 0;
+
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
}
static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ const struct net *net = nf_ct_net(ct);
+
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
+ return 0;
+
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
}
@@ -172,43 +130,9 @@ int nf_ct_expect_register_notifier(struct net *net,
void nf_ct_expect_unregister_notifier(struct net *net,
struct nf_exp_event_notifier *nb);
-static inline void
-nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
- struct nf_conntrack_expect *exp,
- u32 portid,
- int report)
-{
- struct net *net = nf_ct_exp_net(exp);
- struct nf_exp_event_notifier *notify;
- struct nf_conntrack_ecache *e;
-
- rcu_read_lock();
- notify = rcu_dereference(net->ct.nf_expect_event_cb);
- if (notify == NULL)
- goto out_unlock;
-
- e = nf_ct_ecache_find(exp->master);
- if (e == NULL)
- goto out_unlock;
-
- if (e->expmask & (1 << event)) {
- struct nf_exp_event item = {
- .exp = exp,
- .portid = portid,
- .report = report
- };
- notify->fcn(1 << event, &item);
- }
-out_unlock:
- rcu_read_unlock();
-}
-
-static inline void
-nf_ct_expect_event(enum ip_conntrack_expect_events event,
- struct nf_conntrack_expect *exp)
-{
- nf_ct_expect_event_report(event, exp, 0, 0);
-}
+void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
+ struct nf_conntrack_expect *exp,
+ u32 portid, int report);
int nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
@@ -245,8 +169,6 @@ static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
u32 portid,
int report) { return 0; }
static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
-static inline void nf_ct_expect_event(enum ip_conntrack_expect_events event,
- struct nf_conntrack_expect *exp) {}
static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
struct nf_conntrack_expect *exp,
u32 portid,
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index dce56f09ac9a..5ed33ea4718e 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -10,6 +10,7 @@
extern unsigned int nf_ct_expect_hsize;
extern unsigned int nf_ct_expect_max;
+extern struct hlist_head *nf_ct_expect_hash;
struct nf_conntrack_expect {
/* Conntrack expectation list member */
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 956d8a6ac069..1a5fb36f165f 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -23,6 +23,9 @@ struct nf_conntrack_l4proto {
/* L4 Protocol number. */
u_int8_t l4proto;
+ /* Resolve clashes on insertion races. */
+ bool allow_clash;
+
/* Try to fill in the third arg: dataoff is offset past network protocol
hdr. Return true if possible. */
bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 7e2b1d025f50..c5f8fc736b3d 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -45,7 +45,6 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
#endif
}
-bool nf_connlabel_match(const struct nf_conn *ct, u16 bit);
int nf_connlabel_set(struct nf_conn *ct, u16 bit);
int nf_connlabels_replace(struct nf_conn *ct,
@@ -54,11 +53,11 @@ int nf_connlabels_replace(struct nf_conn *ct,
#ifdef CONFIG_NF_CONNTRACK_LABELS
int nf_conntrack_labels_init(void);
void nf_conntrack_labels_fini(void);
-int nf_connlabels_get(struct net *net, unsigned int n_bits);
+int nf_connlabels_get(struct net *net, unsigned int bit);
void nf_connlabels_put(struct net *net);
#else
static inline int nf_conntrack_labels_init(void) { return 0; }
static inline void nf_conntrack_labels_fini(void) {}
-static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
static inline void nf_connlabels_put(struct net *net) {}
#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f6b1daf2e698..092235458691 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -303,7 +303,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
struct nft_set {
struct list_head list;
struct list_head bindings;
- char name[IFNAMSIZ];
+ char name[NFT_SET_MAXNAMELEN];
u32 ktype;
u32 dtype;
u32 size;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0e3172751755..254a0fc01800 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -98,14 +98,17 @@
* nla_put_u8(skb, type, value) add u8 attribute to skb
* nla_put_u16(skb, type, value) add u16 attribute to skb
* nla_put_u32(skb, type, value) add u32 attribute to skb
- * nla_put_u64(skb, type, value) add u64 attribute to skb
+ * nla_put_u64_64bits(skb, type,
+ * value, padattr) add u64 attribute to skb
* nla_put_s8(skb, type, value) add s8 attribute to skb
* nla_put_s16(skb, type, value) add s16 attribute to skb
* nla_put_s32(skb, type, value) add s32 attribute to skb
- * nla_put_s64(skb, type, value) add s64 attribute to skb
+ * nla_put_s64(skb, type, value,
+ * padattr) add s64 attribute to skb
* nla_put_string(skb, type, str) add string attribute to skb
* nla_put_flag(skb, type) add flag attribute to skb
- * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ * nla_put_msecs(skb, type, jiffies,
+ * padattr) add msecs attribute to skb
* nla_put_in_addr(skb, type, addr) add IPv4 address attribute to skb
* nla_put_in6_addr(skb, type, addr) add IPv6 address attribute to skb
*
@@ -244,13 +247,21 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count);
int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
int nla_strcmp(const struct nlattr *nla, const char *str);
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr);
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr);
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data);
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr);
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr);
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
int nla_append(struct sk_buff *skb, int attrlen, const void *data);
@@ -837,47 +848,56 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
}
/**
- * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_u64_64bit - Add a u64 netlink attribute to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
+ u64 value, int padattr)
{
- return nla_put(skb, attrtype, sizeof(u64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr);
}
/**
- * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
+ int padattr)
{
- return nla_put(skb, attrtype, sizeof(__be64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr);
}
/**
- * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * nla_put_net64 - Add 64-bit network byte order nlattr to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
+ int padattr)
{
- return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value,
+ padattr);
}
/**
- * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
+ int padattr)
{
- return nla_put(skb, attrtype, sizeof(__le64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr);
}
/**
@@ -914,14 +934,16 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
}
/**
- * nla_put_s64 - Add a s64 netlink attribute to a socket buffer
+ * nla_put_s64 - Add a s64 netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: numeric value
+ * @padattr: attribute type for the padding
*/
-static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
+ int padattr)
{
- return nla_put(skb, attrtype, sizeof(s64), &value);
+ return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr);
}
/**
@@ -947,16 +969,18 @@ static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
}
/**
- * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
+ * nla_put_msecs - Add a msecs netlink attribute to a skb and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @njiffies: number of jiffies to convert to msecs
+ * @padattr: attribute type for the padding
*/
static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
- unsigned long njiffies)
+ unsigned long njiffies, int padattr)
{
u64 tmp = jiffies_to_msecs(njiffies);
- return nla_put(skb, attrtype, sizeof(u64), &tmp);
+
+ return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
}
/**
@@ -1231,6 +1255,61 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
}
/**
+ * nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
+ * @skb: socket buffer the message is stored in
+ *
+ * Return true if padding is needed to align the next attribute (nla_data()) to
+ * a 64-bit aligned area.
+ */
+static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /* The nlattr header is 4 bytes in size, that's why we test
+ * if the skb->data _is_ aligned. A NOP attribute, plus
+ * nlattr header for next attribute, will make nla_data()
+ * 8-byte aligned.
+ */
+ if (IS_ALIGNED((unsigned long)skb_tail_pointer(skb), 8))
+ return true;
+#endif
+ return false;
+}
+
+/**
+ * nla_align_64bit - 64-bit align the nla_data() of next attribute
+ * @skb: socket buffer the message is stored in
+ * @padattr: attribute type for the padding
+ *
+ * Conditionally emit a padding netlink attribute in order to make
+ * the next attribute we emit have a 64-bit aligned nla_data() area.
+ * This will only be done in architectures which do not have
+ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
+ *
+ * Returns zero on success or a negative error code.
+ */
+static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
+{
+ if (nla_need_padding_for_64bit(skb) &&
+ !nla_reserve(skb, padattr, 0))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+/**
+ * nla_total_size_64bit - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size_64bit(int payload)
+{
+ return NLA_ALIGN(nla_attr_size(payload))
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ + NLA_ALIGN(nla_attr_size(0))
+#endif
+ ;
+}
+
+/**
* nla_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @head: head of attribute stream
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 723b61c82b3f..38b1a80517f0 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -84,7 +84,6 @@ struct netns_ct {
struct ctl_table_header *event_sysctl_header;
struct ctl_table_header *helper_sysctl_header;
#endif
- char *slabname;
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_events;
int sysctl_acct;
@@ -93,11 +92,6 @@ struct netns_ct {
int sysctl_tstamp;
int sysctl_checksum;
- unsigned int htable_size;
- seqcount_t generation;
- struct kmem_cache *nf_conntrack_cachep;
- struct hlist_nulls_head *hash;
- struct hlist_head *expect_hash;
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
@@ -107,9 +101,5 @@ struct netns_ct {
unsigned int labels_used;
u8 label_words;
#endif
-#ifdef CONFIG_NF_NAT_NEEDED
- struct hlist_head *nat_bysource;
- unsigned int nat_htable_size;
-#endif
};
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index a69cde3ce460..d061ffeb1e71 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -133,6 +133,9 @@ struct netns_ipv4 {
struct fib_rules_ops *mr_rules_ops;
#endif
#endif
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ int sysctl_fib_multipath_use_neigh;
+#endif
atomic_t rt_genid;
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 730d82ad6ee5..24cd3949a9a4 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -80,6 +80,7 @@ struct netns_xfrm {
struct flow_cache flow_cache_global;
atomic_t flow_cache_genid;
struct list_head flow_cache_gc_list;
+ atomic_t flow_cache_gc_count;
spinlock_t flow_cache_gc_lock;
struct work_struct flow_cache_gc_work;
struct work_struct flow_cache_flush_work;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 57ce24fb0047..87499b6b35d6 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -109,7 +109,13 @@ struct nci_ops {
struct nci_conn_info {
struct list_head list;
- __u8 id; /* can be an RF Discovery ID or an NFCEE ID */
+ /* NCI specification 4.4.2 Connection Creation
+ * The combination of destination type and destination specific
+ * parameters shall uniquely identify a single destination for the
+ * Logical Connection
+ */
+ struct dest_spec_params *dest_params;
+ __u8 dest_type;
__u8 conn_id;
__u8 max_pkt_payload_len;
@@ -260,7 +266,9 @@ struct nci_dev {
__u32 manufact_specific_info;
/* Save RF Discovery ID or NFCEE ID under conn_create */
- __u8 cur_id;
+ struct dest_spec_params cur_params;
+ /* Save destination type under conn_create */
+ __u8 cur_dest_type;
/* stored during nci_data_exchange */
struct sk_buff *rx_data_reassembly;
@@ -298,6 +306,8 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type,
size_t params_len,
struct core_conn_create_dest_spec_params *params);
int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id);
+int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len,
+ struct sk_buff **resp);
struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev);
int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event,
@@ -378,7 +388,8 @@ void nci_clear_target_list(struct nci_dev *ndev);
void nci_req_complete(struct nci_dev *ndev, int result);
struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
int conn_id);
-int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id);
+int nci_get_conn_info_by_dest_type_params(struct nci_dev *ndev, u8 dest_type,
+ struct dest_spec_params *params);
/* ----- NCI status code ----- */
int nci_to_errno(__u8 code);
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 32cb3e591e07..fcab4de49951 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -138,6 +138,8 @@ enum nl802154_attrs {
NL802154_ATTR_SEC_KEY,
#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+ NL802154_ATTR_PAD,
+
__NL802154_ATTR_AFTER_LAST,
NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
};
@@ -295,6 +297,7 @@ enum nl802154_dev_addr_attrs {
NL802154_DEV_ADDR_ATTR_MODE,
NL802154_DEV_ADDR_ATTR_SHORT,
NL802154_DEV_ADDR_ATTR_EXTENDED,
+ NL802154_DEV_ADDR_ATTR_PAD,
/* keep last */
__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
@@ -320,6 +323,7 @@ enum nl802154_key_id_attrs {
NL802154_KEY_ID_ATTR_IMPLICIT,
NL802154_KEY_ID_ATTR_SOURCE_SHORT,
NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+ NL802154_KEY_ID_ATTR_PAD,
/* keep last */
__NL802154_KEY_ID_ATTR_AFTER_LAST,
@@ -402,6 +406,7 @@ enum nl802154_dev {
NL802154_DEV_ATTR_EXTENDED_ADDR,
NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
NL802154_DEV_ATTR_KEY_MODE,
+ NL802154_DEV_ATTR_PAD,
/* keep last */
__NL802154_DEV_ATTR_AFTER_LAST,
@@ -414,6 +419,7 @@ enum nl802154_devkey {
NL802154_DEVKEY_ATTR_FRAME_COUNTER,
NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
NL802154_DEVKEY_ATTR_ID,
+ NL802154_DEVKEY_ATTR_PAD,
/* keep last */
__NL802154_DEVKEY_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index caa5e18636df..0f7efa88f210 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -392,9 +392,6 @@ struct tc_cls_u32_offload {
};
};
-/* tca flags definitions */
-#define TCA_CLS_FLAGS_SKIP_HW 1
-
static inline bool tc_should_offload(struct net_device *dev, u32 flags)
{
if (!(dev->features & NETIF_F_HW_TC))
@@ -409,9 +406,27 @@ static inline bool tc_should_offload(struct net_device *dev, u32 flags)
return true;
}
+static inline bool tc_skip_sw(u32 flags)
+{
+ return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static inline bool tc_flags_valid(u32 flags)
+{
+ if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
+ return false;
+
+ if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
+ return false;
+
+ return true;
+}
+
enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
+ TC_CLSFLOWER_STATS,
};
struct tc_cls_flower_offload {
diff --git a/include/net/protocol.h b/include/net/protocol.h
index da689f5432de..bf36ca34af7a 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -107,9 +107,6 @@ int inet_del_offload(const struct net_offload *prot, unsigned char num);
void inet_register_protosw(struct inet_protosw *p);
void inet_unregister_protosw(struct inet_protosw *p);
-int udp_add_offload(struct net *net, struct udp_offload *prot);
-void udp_del_offload(struct udp_offload *prot);
-
#if IS_ENABLED(CONFIG_IPV6)
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index f49759decb28..6ebe13eb1c4c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -85,24 +85,23 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
struct request_sock *req;
req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
-
- if (req) {
- req->rsk_ops = ops;
- if (attach_listener) {
- sock_hold(sk_listener);
- req->rsk_listener = sk_listener;
- } else {
- req->rsk_listener = NULL;
+ if (!req)
+ return NULL;
+ req->rsk_listener = NULL;
+ if (attach_listener) {
+ if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
+ kmem_cache_free(ops->slab, req);
+ return NULL;
}
- req_to_sk(req)->sk_prot = sk_listener->sk_prot;
- sk_node_init(&req_to_sk(req)->sk_node);
- sk_tx_queue_clear(req_to_sk(req));
- req->saved_syn = NULL;
- /* Following is temporary. It is coupled with debugging
- * helpers in reqsk_put() & reqsk_free()
- */
- atomic_set(&req->rsk_refcnt, 0);
+ req->rsk_listener = sk_listener;
}
+ req->rsk_ops = ops;
+ req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+ sk_node_init(&req_to_sk(req)->sk_node);
+ sk_tx_queue_clear(req_to_sk(req));
+ req->saved_syn = NULL;
+ atomic_set(&req->rsk_refcnt, 0);
+
return req;
}
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523bb428..ad777d79af94 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
void ip_rt_multicast_event(struct in_device *);
int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+ unsigned int flags, u16 type,
+ bool nopolicy, bool noxfrm, bool will_cache);
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
@@ -322,10 +325,11 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
static inline int inet_iif(const struct sk_buff *skb)
{
- int iif = skb_rtable(skb)->rt_iif;
+ struct rtable *rt = skb_rtable(skb);
+
+ if (rt && rt->rt_iif)
+ return rt->rt_iif;
- if (iif)
- return iif;
return skb->skb_iif;
}
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 2f87c1ba13de..006a7b81d758 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -47,6 +47,9 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* @get_num_rx_queues: Function to determine number of receive queues
* to create when creating a new device.
* @get_link_net: Function to get the i/o netns of the device
+ * @get_linkxstats_size: Function to calculate the required room for
+ * dumping device-specific extended link stats
+ * @fill_linkxstats: Function to dump device-specific extended link stats
*/
struct rtnl_link_ops {
struct list_head list;
@@ -95,6 +98,10 @@ struct rtnl_link_ops {
const struct net_device *dev,
const struct net_device *slave_dev);
struct net *(*get_link_net)(const struct net_device *dev);
+ size_t (*get_linkxstats_size)(const struct net_device *dev);
+ int (*fill_linkxstats)(struct sk_buff *skb,
+ const struct net_device *dev,
+ int *prividx);
};
int __rtnl_link_register(struct rtnl_link_ops *ops);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 46e55f0202a6..a1fd76c22a59 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -527,11 +527,27 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
return q->flags & TCQ_F_CPUSTATS;
}
+static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
+ __u64 bytes, __u32 packets)
+{
+ bstats->bytes += bytes;
+ bstats->packets += packets;
+}
+
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
{
- bstats->bytes += qdisc_pkt_len(skb);
- bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+ _bstats_update(bstats,
+ qdisc_pkt_len(skb),
+ skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+}
+
+static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+ __u64 bytes, __u32 packets)
+{
+ u64_stats_update_begin(&bstats->syncp);
+ _bstats_update(&bstats->bstats, bytes, packets);
+ u64_stats_update_end(&bstats->syncp);
}
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 65521cfdcade..b392ac8382f2 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -116,6 +116,22 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_stop(struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_next(struct net *net,
+ struct rhashtable_iter *iter);
+struct sctp_transport *sctp_transport_get_idx(struct net *net,
+ struct rhashtable_iter *iter, int pos);
+int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
+ struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr, void *p);
+int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
+ struct net *net, int pos, void *p);
+int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
+int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
+ struct sctp_info *info);
+
/*
* sctp/primitive.c
*/
@@ -189,10 +205,9 @@ extern int sysctl_sctp_wmem[3];
*/
/* SCTP SNMP MIB stats handlers */
-#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
-#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
-#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
+#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
/* sctp mib definitions */
enum {
@@ -359,21 +374,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
#define sctp_skb_for_each(pos, head, tmp) \
skb_queue_walk_safe(head, pos, tmp)
-/* A helper to append an entire skb list (list) to another (head). */
-static inline void sctp_skb_list_tail(struct sk_buff_head *list,
- struct sk_buff_head *head)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&head->lock, flags);
- spin_lock(&list->lock);
-
- skb_queue_splice_tail_init(list, head);
-
- spin_unlock(&list->lock);
- spin_unlock_irqrestore(&head->lock, flags);
-}
-
/**
* sctp_list_dequeue - remove from the head of the queue
* @list: list to dequeue from
@@ -386,11 +386,9 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list)
{
struct list_head *result = NULL;
- if (list->next != list) {
+ if (!list_empty(list)) {
result = list->next;
- list->next = result->next;
- list->next->prev = list;
- INIT_LIST_HEAD(result);
+ list_del_init(result);
}
return result;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6df1ce7a411c..16b013a6191c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -210,14 +210,15 @@ struct sctp_sock {
int user_frag;
__u32 autoclose;
- __u8 nodelay;
- __u8 disable_fragments;
- __u8 v4mapped;
- __u8 frag_interleave;
__u32 adaptation_ind;
__u32 pd_point;
- __u8 recvrcvinfo;
- __u8 recvnxtinfo;
+ __u16 nodelay:1,
+ disable_fragments:1,
+ v4mapped:1,
+ frag_interleave:1,
+ recvrcvinfo:1,
+ recvnxtinfo:1,
+ data_ready_signalled:1;
atomic_t pd_mode;
/* Receive to here while partial delivery is in effect. */
@@ -847,6 +848,11 @@ struct sctp_transport {
*/
ktime_t last_time_heard;
+ /* When was the last time that we sent a chunk using this
+ * transport? We use this to check for idle transports
+ */
+ unsigned long last_time_sent;
+
/* Last time(in jiffies) when cwnd is reduced due to the congestion
* indication based on ECNE chunk.
*/
@@ -952,7 +958,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
struct sctp_sock *);
void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 35512ac6dcfb..c9228ad7ee91 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -123,12 +123,9 @@ struct linux_xfrm_mib {
#define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) __percpu *name
-#define SNMP_INC_STATS_BH(mib, field) \
+#define __SNMP_INC_STATS(mib, field) \
__this_cpu_inc(mib->mibs[field])
-#define SNMP_INC_STATS_USER(mib, field) \
- this_cpu_inc(mib->mibs[field])
-
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
atomic_long_inc(&mib->mibs[field])
@@ -138,12 +135,9 @@ struct linux_xfrm_mib {
#define SNMP_DEC_STATS(mib, field) \
this_cpu_dec(mib->mibs[field])
-#define SNMP_ADD_STATS_BH(mib, field, addend) \
+#define __SNMP_ADD_STATS(mib, field, addend) \
__this_cpu_add(mib->mibs[field], addend)
-#define SNMP_ADD_STATS_USER(mib, field, addend) \
- this_cpu_add(mib->mibs[field], addend)
-
#define SNMP_ADD_STATS(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend)
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
@@ -152,7 +146,7 @@ struct linux_xfrm_mib {
this_cpu_inc(ptr[basefield##PKTS]); \
this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0)
-#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
+#define __SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
__typeof__((mib->mibs) + 0) ptr = mib->mibs; \
__this_cpu_inc(ptr[basefield##PKTS]); \
@@ -162,7 +156,7 @@ struct linux_xfrm_mib {
#if BITS_PER_LONG==32
-#define SNMP_ADD_STATS64_BH(mib, field, addend) \
+#define __SNMP_ADD_STATS64(mib, field, addend) \
do { \
__typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
@@ -170,20 +164,16 @@ struct linux_xfrm_mib {
u64_stats_update_end(&ptr->syncp); \
} while (0)
-#define SNMP_ADD_STATS64_USER(mib, field, addend) \
+#define SNMP_ADD_STATS64(mib, field, addend) \
do { \
local_bh_disable(); \
- SNMP_ADD_STATS64_BH(mib, field, addend); \
- local_bh_enable(); \
+ __SNMP_ADD_STATS64(mib, field, addend); \
+ local_bh_enable(); \
} while (0)
-#define SNMP_ADD_STATS64(mib, field, addend) \
- SNMP_ADD_STATS64_USER(mib, field, addend)
-
-#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
-#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
+#define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
__typeof__(*mib) *ptr; \
ptr = raw_cpu_ptr((mib)); \
@@ -195,19 +185,17 @@ struct linux_xfrm_mib {
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
local_bh_disable(); \
- SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \
- local_bh_enable(); \
+ __SNMP_UPD_PO_STATS64(mib, basefield, addend); \
+ local_bh_enable(); \
} while (0)
#else
-#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
-#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
+#define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field)
#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
-#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
-#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
+#define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend)
#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
-#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
+#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
#endif
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 255d3e03727b..649d2a8c17fc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -178,7 +178,7 @@ struct sock_common {
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
- struct hlist_nulls_node skc_portaddr_node;
+ struct hlist_node skc_portaddr_node;
};
struct proto *skc_prot;
possible_net_t skc_net;
@@ -382,8 +382,13 @@ struct sock {
atomic_t sk_omem_alloc;
int sk_sndbuf;
struct sk_buff_head sk_write_queue;
+
+ /*
+ * Because of non atomicity rules, all
+ * changes are protected by socket lock.
+ */
kmemcheck_bitfield_begin(flags);
- unsigned int sk_shutdown : 2,
+ unsigned int sk_padding : 2,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
sk_userlocks : 4,
@@ -391,6 +396,7 @@ struct sock {
sk_type : 16;
#define SK_PROTOCOL_MAX U8_MAX
kmemcheck_bitfield_end(flags);
+
int sk_wmem_queued;
gfp_t sk_allocation;
u32 sk_pacing_rate; /* bytes per second */
@@ -418,6 +424,7 @@ struct sock {
struct timer_list sk_timer;
ktime_t sk_stamp;
u16 sk_tsflags;
+ u8 sk_shutdown;
u32 sk_tskey;
struct socket *sk_socket;
void *sk_user_data;
@@ -438,6 +445,7 @@ struct sock {
struct sk_buff *skb);
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
+ struct rcu_head sk_rcu;
};
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
@@ -456,28 +464,32 @@ struct sock {
#define SK_CAN_REUSE 1
#define SK_FORCE_REUSE 2
+int sk_set_peek_off(struct sock *sk, int val);
+
static inline int sk_peek_offset(struct sock *sk, int flags)
{
- if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
- return sk->sk_peek_off;
- else
- return 0;
+ if (unlikely(flags & MSG_PEEK)) {
+ s32 off = READ_ONCE(sk->sk_peek_off);
+ if (off >= 0)
+ return off;
+ }
+
+ return 0;
}
static inline void sk_peek_offset_bwd(struct sock *sk, int val)
{
- if (sk->sk_peek_off >= 0) {
- if (sk->sk_peek_off >= val)
- sk->sk_peek_off -= val;
- else
- sk->sk_peek_off = 0;
+ s32 off = READ_ONCE(sk->sk_peek_off);
+
+ if (unlikely(off >= 0)) {
+ off = max_t(s32, off - val, 0);
+ WRITE_ONCE(sk->sk_peek_off, off);
}
}
static inline void sk_peek_offset_fwd(struct sock *sk, int val)
{
- if (sk->sk_peek_off >= 0)
- sk->sk_peek_off += val;
+ sk_peek_offset_bwd(sk, -val);
}
/*
@@ -564,7 +576,7 @@ static inline bool __sk_del_node_init(struct sock *sk)
modifications.
*/
-static inline void sock_hold(struct sock *sk)
+static __always_inline void sock_hold(struct sock *sk)
{
atomic_inc(&sk->sk_refcnt);
}
@@ -572,7 +584,7 @@ static inline void sock_hold(struct sock *sk)
/* Ungrab socket in the context, which assumes that socket refcnt
cannot hit zero, f.e. it is true in context of any socketcall.
*/
-static inline void __sock_put(struct sock *sk)
+static __always_inline void __sock_put(struct sock *sk)
{
atomic_dec(&sk->sk_refcnt);
}
@@ -625,12 +637,20 @@ static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
- hlist_add_head_rcu(&sk->sk_node, list);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ hlist_add_tail_rcu(&sk->sk_node, list);
+ else
+ hlist_add_head_rcu(&sk->sk_node, list);
}
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
- hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+ if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ sk->sk_family == AF_INET6)
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+ else
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
@@ -669,18 +689,18 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry(__sk, list, sk_bind_node)
/**
- * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
+ * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_node to use as a loop cursor.
* @head: the head for your list.
* @offset: offset of hlist_node within the struct.
*
*/
-#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset) \
- for (pos = (head)->first; \
- (!is_a_nulls(pos)) && \
+#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
+ for (pos = rcu_dereference((head)->first); \
+ pos != NULL && \
({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
- pos = pos->next)
+ pos = rcu_dereference(pos->next))
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
@@ -720,6 +740,7 @@ enum sock_flags {
*/
SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -912,6 +933,17 @@ void sk_stream_kill_queues(struct sock *sk);
void sk_set_memalloc(struct sock *sk);
void sk_clear_memalloc(struct sock *sk);
+void __sk_flush_backlog(struct sock *sk);
+
+static inline bool sk_flush_backlog(struct sock *sk)
+{
+ if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
+ __sk_flush_backlog(sk);
+ return true;
+ }
+ return false;
+}
+
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
struct request_sock_ops;
@@ -1310,24 +1342,14 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
}
-/* Used by processes to "lock" a socket state, so that
- * interrupts and bottom half handlers won't change it
- * from under us. It essentially blocks any incoming
- * packets, so that we won't get any new data or any
- * packets that change the state of the socket.
- *
- * While locked, BH processing will add new packets to
- * the backlog queue. This queue is processed by the
- * owner of the socket lock right before it is released.
- *
- * Since ~2.3.5 it is also exclusive sleep lock serializing
- * accesses from user process context.
- */
-#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
-
static inline void sock_release_ownership(struct sock *sk)
{
- sk->sk_lock.owned = 0;
+ if (sk->sk_lock.owned) {
+ sk->sk_lock.owned = 0;
+
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ }
}
/*
@@ -1349,6 +1371,16 @@ do { \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
+#ifdef CONFIG_LOCKDEP
+static inline bool lockdep_sock_is_held(const struct sock *csk)
+{
+ struct sock *sk = (struct sock *)csk;
+
+ return lockdep_is_held(&sk->sk_lock) ||
+ lockdep_is_held(&sk->sk_lock.slock);
+}
+#endif
+
void lock_sock_nested(struct sock *sk, int subclass);
static inline void lock_sock(struct sock *sk)
@@ -1382,6 +1414,40 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
spin_unlock_bh(&sk->sk_lock.slock);
}
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue. This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+
+static inline void sock_owned_by_me(const struct sock *sk)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
+#endif
+}
+
+static inline bool sock_owned_by_user(const struct sock *sk)
+{
+ sock_owned_by_me(sk);
+ return sk->sk_lock.owned;
+}
+
+/* no reclassification while locks are held */
+static inline bool sock_allow_reclassification(const struct sock *csk)
+{
+ struct sock *sk = (struct sock *)csk;
+
+ return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
+}
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
@@ -1391,6 +1457,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
+void __sock_wfree(struct sk_buff *skb);
void sock_wfree(struct sk_buff *skb);
void skb_orphan_partial(struct sk_buff *skb);
void sock_rfree(struct sk_buff *skb);
@@ -1418,8 +1485,11 @@ void sk_send_sigurg(struct sock *sk);
struct sockcm_cookie {
u32 mark;
+ u16 tsflags;
};
+int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+ struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
struct sockcm_cookie *sockc);
@@ -1584,8 +1654,8 @@ static inline void sk_rethink_txhash(struct sock *sk)
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
- return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
- lockdep_is_held(&sk->sk_lock.slock));
+ return rcu_dereference_check(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
}
static inline struct dst_entry *
@@ -1857,6 +1927,7 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
@@ -1893,11 +1964,19 @@ static inline unsigned long sock_wspace(struct sock *sk)
*/
static inline void sk_set_bit(int nr, struct sock *sk)
{
+ if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+ !sock_flag(sk, SOCK_FASYNC))
+ return;
+
set_bit(nr, &sk->sk_wq_raw->flags);
}
static inline void sk_clear_bit(int nr, struct sock *sk)
{
+ if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
+ !sock_flag(sk, SOCK_FASYNC))
+ return;
+
clear_bit(nr, &sk->sk_wq_raw->flags);
}
@@ -2007,6 +2086,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
}
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+ int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ atomic_add(segs, &sk->sk_drops);
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2054,19 +2140,21 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk->sk_stamp = skb->tstamp;
}
-void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
+void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
/**
* sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
+ * @tsflags: timestamping flags to use
* @tx_flags: completed with instructions for time stamping
*
* Note : callers should take care of initial *tx_flags value (usually 0)
*/
-static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags)
{
- if (unlikely(sk->sk_tsflags))
- __sock_tx_timestamp(sk, tx_flags);
+ if (unlikely(tsflags))
+ __sock_tx_timestamp(tsflags, tx_flags);
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
}
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d451122e8404..985619a59323 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -54,6 +54,8 @@ struct switchdev_attr {
struct net_device *orig_dev;
enum switchdev_attr_id id;
u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
union {
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
struct net_device *orig_dev;
enum switchdev_obj_id id;
u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
};
/* SWITCHDEV_OBJ_ID_PORT_VLAN */
@@ -93,7 +97,7 @@ struct switchdev_obj_ipv4_fib {
struct switchdev_obj obj;
u32 dst;
int dst_len;
- struct fib_info fi;
+ struct fib_info *fi;
u8 tos;
u8 type;
u32 nlflags;
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index dae96bae1c19..e891835eb74e 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -2,6 +2,7 @@
#define __NET_TC_MIR_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_mirred.h>
struct tcf_mirred {
struct tcf_common common;
@@ -14,4 +15,18 @@ struct tcf_mirred {
#define to_mirred(a) \
container_of(a->priv, struct tcf_mirred, common)
+static inline bool is_tcf_mirred_redirect(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_MIRRED)
+ return to_mirred(a)->tcfm_eaction == TCA_EGRESS_REDIR;
+#endif
+ return false;
+}
+
+static inline int tcf_mirred_ifindex(const struct tc_action *a)
+{
+ return to_mirred(a)->tcfm_ifindex;
+}
+
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b91370f61be6..0bcc70f4e1fb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -332,9 +332,8 @@ bool tcp_check_oom(struct sock *sk, int shift);
extern struct proto tcp_prot;
#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
+#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
void tcp_tasklet_init(void);
@@ -452,10 +451,15 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int tcp_connect(struct sock *sk);
+enum tcp_synack_type {
+ TCP_SYNACK_NORMAL,
+ TCP_SYNACK_FASTOPEN,
+ TCP_SYNACK_COOKIE,
+};
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- bool attach_req);
+ enum tcp_synack_type synack_type);
int tcp_disconnect(struct sock *sk, int flags);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -533,8 +537,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
-int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
@@ -552,6 +556,8 @@ void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+ const struct sk_buff *next_skb);
/* tcp_input.c */
void tcp_resume_early_retransmit(struct sock *sk);
@@ -754,14 +760,21 @@ struct tcp_skb_cb {
TCPCB_REPAIRED)
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
- /* 1 byte hole */
+ __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
+ eor:1, /* Is skb MSG_EOR marked? */
+ unused:6;
__u32 ack_seq; /* Sequence number ACK'd */
union {
- struct inet_skb_parm h4;
+ struct {
+ /* There is space for up to 20 bytes */
+ } tx; /* only used for outgoing skbs */
+ union {
+ struct inet_skb_parm h4;
#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_skb_parm h6;
+ struct inet6_skb_parm h6;
#endif
- } header; /* For incoming frames */
+ } header; /* For incoming skbs */
+ };
};
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
@@ -773,7 +786,9 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
- return TCP_SKB_CB(skb)->header.h6.iif;
+ bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+
+ return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
}
#endif
@@ -801,6 +816,11 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
return TCP_SKB_CB(skb)->tcp_gso_size;
}
+static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
+{
+ return likely(!TCP_SKB_CB(skb)->eor);
+}
+
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
@@ -836,6 +856,11 @@ enum tcp_ca_ack_event_flags {
union tcp_cc_info;
+struct ack_sample {
+ u32 pkts_acked;
+ s32 rtt_us;
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -859,7 +884,7 @@ struct tcp_congestion_ops {
/* new value of cwnd after loss (optional) */
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
- void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
+ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -1039,17 +1064,6 @@ static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
return 3;
}
-/* Slow start with delack produces 3 packets of burst, so that
- * it is safe "de facto". This will be the default - same as
- * the default reordering threshold - but if reordering increases,
- * we must be able to allow cwnd to burst at least this much in order
- * to not pull it back when holes are filled.
- */
-static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
-{
- return tp->reordering;
-}
-
/* Returns end sequence number of the receiver's advertised window */
static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
{
@@ -1301,10 +1315,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
}
/* from STCP */
@@ -1738,7 +1752,7 @@ struct tcp_request_sock_ops {
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- bool attach_req);
+ enum tcp_synack_type synack_type);
};
#ifdef CONFIG_SYN_COOKIES
@@ -1747,7 +1761,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
__u16 *mss)
{
tcp_synq_overflow(sk);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return ops->cookie_init_seq(skb, mss);
}
#else
@@ -1846,4 +1860,17 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
tp->data_segs_in += segs_in;
}
+/*
+ * TCP listen path runs lockless.
+ * We forced "struct sock" to be const qualified to make sure
+ * we don't modify one of its field by mistake.
+ * Here, we increment sk_drops which is an atomic_t, so we can safely
+ * make sock writable again.
+ */
+static inline void tcp_listendrop(const struct sock *sk)
+{
+ atomic_inc(&((struct sock *)sk)->sk_drops);
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
+}
+
#endif /* _TCP_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index b927413dde86..276f9760ab56 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -41,8 +41,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
- struct flowi6 *fl6, struct ipv6_txoptions *opt,
- int *hlimit, int *tclass, int *dontfrag);
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+ struct sockcm_cookie *sockc);
void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index 92927f729ac8..ae07f375370d 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -59,7 +59,7 @@ struct udp_skb_cb {
* @lock: spinlock protecting changes to head/count
*/
struct udp_hslot {
- struct hlist_nulls_head head;
+ struct hlist_head head;
int count;
spinlock_t lock;
} __attribute__((aligned(2 * sizeof(long))));
@@ -158,9 +158,21 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr,
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len);
+static inline void udp_csum_pull_header(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_NONE)
+ skb->csum = csum_partial(udp_hdr(skb), sizeof(struct udphdr),
+ skb->csum);
+ skb_pull_rcsum(skb, sizeof(struct udphdr));
+ UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
+}
+
+typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
+ __be16 dport);
+
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh);
-int udp_gro_complete(struct sk_buff *skb, int nhoff);
+ struct udphdr *uh, udp_lookup_t lookup);
+int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
{
@@ -260,6 +272,8 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif,
struct udp_table *tbl, struct sk_buff *skb);
+struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport);
struct sock *udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
@@ -269,36 +283,38 @@ struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *tbl,
struct sk_buff *skb);
+struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport);
/*
* SNMP statistics for UDP and UDP-Lite
*/
-#define UDP_INC_STATS_USER(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \
- else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0)
-#define UDP_INC_STATS_BH(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \
- else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0)
-
-#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \
- if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\
- else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \
+#define UDP_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
+#define __UDP_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
+ else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
+
+#define __UDP6_INC_STATS(net, field, is_udplite) do { \
+ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
+ else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0)
-#define UDP6_INC_STATS_USER(net, field, __lite) do { \
- if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \
- else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \
+#define UDP6_INC_STATS(net, field, __lite) do { \
+ if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
+ else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0)
#if IS_ENABLED(CONFIG_IPV6)
-#define UDPX_INC_STATS_BH(sk, field) \
+#define __UDPX_INC_STATS(sk, field) \
do { \
if ((sk)->sk_family == AF_INET) \
- UDP_INC_STATS_BH(sock_net(sk), field, 0); \
+ __UDP_INC_STATS(sock_net(sk), field, 0); \
else \
- UDP6_INC_STATS_BH(sock_net(sk), field, 0); \
+ __UDP6_INC_STATS(sock_net(sk), field, 0); \
} while (0)
#else
-#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0)
+#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
#endif
/* /proc */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b83114077cee..9d14f707e534 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -64,6 +64,11 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
+typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb);
+typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
+ int nhoff);
struct udp_tunnel_sock_cfg {
void *sk_user_data; /* user data used by encap_rcv call back */
@@ -71,6 +76,8 @@ struct udp_tunnel_sock_cfg {
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
+ udp_tunnel_gro_receive_t gro_receive;
+ udp_tunnel_gro_complete_t gro_complete;
};
/* Setup the given (UDP) sock to receive UDP encapsulated packets */
@@ -98,23 +105,13 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id,
int md_size);
-static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
- bool udp_csum)
+static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
{
int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
return iptunnel_handle_offloads(skb, type);
}
-static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
-{
- struct udphdr *uh;
-
- uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
- skb_shinfo(skb)->gso_type |= uh->check ?
- SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-}
-
static inline void udp_tunnel_encap_enable(struct socket *sock)
{
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 73ed2e951c02..b8803165df91 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -119,6 +119,64 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
+/*
+ * VXLAN Generic Protocol Extension (VXLAN_F_GPE):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|Ver|I|P|R|O| Reserved |Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Ver = Version. Indicates VXLAN GPE protocol version.
+ *
+ * P = Next Protocol Bit. The P bit is set to indicate that the
+ * Next Protocol field is present.
+ *
+ * O = OAM Flag Bit. The O bit is set to indicate that the packet
+ * is an OAM packet.
+ *
+ * Next Protocol = This 8 bit field indicates the protocol header
+ * immediately following the VXLAN GPE header.
+ *
+ * https://tools.ietf.org/html/draft-ietf-nvo3-vxlan-gpe-01
+ */
+
+struct vxlanhdr_gpe {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 oam_flag:1,
+ reserved_flags1:1,
+ np_applied:1,
+ instance_applied:1,
+ version:2,
+reserved_flags2:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved_flags2:2,
+ version:2,
+ instance_applied:1,
+ np_applied:1,
+ reserved_flags1:1,
+ oam_flag:1;
+#endif
+ u8 reserved_flags3;
+ u8 reserved_flags4;
+ u8 next_protocol;
+ __be32 vx_vni;
+};
+
+/* VXLAN-GPE header flags. */
+#define VXLAN_HF_VER cpu_to_be32(BIT(29) | BIT(28))
+#define VXLAN_HF_NP cpu_to_be32(BIT(26))
+#define VXLAN_HF_OAM cpu_to_be32(BIT(24))
+
+#define VXLAN_GPE_USED_BITS (VXLAN_HF_VER | VXLAN_HF_NP | VXLAN_HF_OAM | \
+ cpu_to_be32(0xff))
+
+/* VXLAN-GPE header Next Protocol. */
+#define VXLAN_GPE_NP_IPV4 0x01
+#define VXLAN_GPE_NP_IPV6 0x02
+#define VXLAN_GPE_NP_ETHERNET 0x03
+#define VXLAN_GPE_NP_NSH 0x04
+
struct vxlan_metadata {
u32 gbp;
};
@@ -126,12 +184,9 @@ struct vxlan_metadata {
/* per UDP socket information */
struct vxlan_sock {
struct hlist_node hlist;
- struct work_struct del_work;
struct socket *sock;
- struct rcu_head rcu;
struct hlist_head vni_list[VNI_HASH_SIZE];
atomic_t refcnt;
- struct udp_offload udp_offloads;
u32 flags;
};
@@ -206,16 +261,26 @@ struct vxlan_dev {
#define VXLAN_F_GBP 0x800
#define VXLAN_F_REMCSUM_NOPARTIAL 0x1000
#define VXLAN_F_COLLECT_METADATA 0x2000
+#define VXLAN_F_GPE 0x4000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
*/
#define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \
+ VXLAN_F_GPE | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_REMCSUM_RX | \
VXLAN_F_REMCSUM_NOPARTIAL | \
VXLAN_F_COLLECT_METADATA)
+/* Flags that can be set together with VXLAN_F_GPE. */
+#define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \
+ VXLAN_F_IPV6 | \
+ VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_TX | \
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_COLLECT_METADATA)
+
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -252,7 +317,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
- sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+ (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
return features;
@@ -325,13 +392,11 @@ static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset)
return vni_field;
}
-#if IS_ENABLED(CONFIG_VXLAN)
-void vxlan_get_rx_port(struct net_device *netdev);
-#else
static inline void vxlan_get_rx_port(struct net_device *netdev)
{
+ ASSERT_RTNL();
+ call_netdevice_notifiers(NETDEV_OFFLOAD_PUSH_VXLAN, netdev);
}
-#endif
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d6f6e5006ee9..adfebd6f243c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -45,12 +45,8 @@
#ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
-#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
#else
#define XFRM_INC_STATS(net, field) ((void)(net))
-#define XFRM_INC_STATS_BH(net, field) ((void)(net))
-#define XFRM_INC_STATS_USER(net, field) ((void)(net))
#endif
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fb2cef4e9747..fc0320c004a3 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -220,6 +220,7 @@ enum ib_device_cap_flags {
IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
+ IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
};
enum ib_signature_prot_cap {
@@ -931,6 +932,13 @@ struct ib_qp_cap {
u32 max_send_sge;
u32 max_recv_sge;
u32 max_inline_data;
+
+ /*
+ * Maximum number of rdma_rw_ctx structures in flight at a time.
+ * ib_create_qp() will calculate the right amount of neededed WRs
+ * and MRs based on this.
+ */
+ u32 max_rdma_ctxs;
};
enum ib_sig_type {
@@ -981,6 +989,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
+ IB_QP_CREATE_SCATTER_FCS = 1 << 8,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1002,7 +1011,11 @@ struct ib_qp_init_attr {
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
enum ib_qp_create_flags create_flags;
- u8 port_num; /* special QP types only */
+
+ /*
+ * Only needed for special QP types, or when using the RW API.
+ */
+ u8 port_num;
};
struct ib_qp_open_attr {
@@ -1421,9 +1434,14 @@ struct ib_qp {
struct ib_pd *pd;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
+ spinlock_t mr_lock;
+ int mrs_used;
+ struct list_head rdma_mrs;
+ struct list_head sig_mrs;
struct ib_srq *srq;
struct ib_xrcd *xrcd; /* XRC TGT QPs only */
struct list_head xrcd_list;
+
/* count times opened, mcast attaches, flow attaches */
atomic_t usecnt;
struct list_head open_list;
@@ -1438,12 +1456,16 @@ struct ib_qp {
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_uobject *uobject;
u32 lkey;
u32 rkey;
u64 iova;
u32 length;
unsigned int page_size;
+ bool need_inval;
+ union {
+ struct ib_uobject *uobject; /* user */
+ struct list_head qp_entry; /* FR */
+ };
};
struct ib_mw {
@@ -1827,7 +1849,8 @@ struct ib_device {
u32 max_num_sg);
int (*map_mr_sg)(struct ib_mr *mr,
struct scatterlist *sg,
- int sg_nents);
+ int sg_nents,
+ unsigned int *sg_offset);
struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
enum ib_mw_type type,
struct ib_udata *udata);
@@ -2317,6 +2340,18 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
device->add_gid && device->del_gid;
}
+/*
+ * Check if the device supports READ W/ INVALIDATE.
+ */
+static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
+{
+ /*
+ * iWarp drivers must support READ W/ INVALIDATE. No other protocol
+ * has support for it yet.
+ */
+ return rdma_protocol_iwarp(dev, port_num);
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid,
struct ib_gid_attr *attr);
@@ -3111,29 +3146,23 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
-int ib_map_mr_sg(struct ib_mr *mr,
- struct scatterlist *sg,
- int sg_nents,
- unsigned int page_size);
+int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset, unsigned int page_size);
static inline int
-ib_map_mr_sg_zbva(struct ib_mr *mr,
- struct scatterlist *sg,
- int sg_nents,
- unsigned int page_size)
+ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset, unsigned int page_size)
{
int n;
- n = ib_map_mr_sg(mr, sg, sg_nents, page_size);
+ n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
mr->iova = 0;
return n;
}
-int ib_sg_to_pages(struct ib_mr *mr,
- struct scatterlist *sgl,
- int sg_nents,
- int (*set_page)(struct ib_mr *, u64));
+int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
+ unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
diff --git a/include/rdma/mr_pool.h b/include/rdma/mr_pool.h
new file mode 100644
index 000000000000..986010b812eb
--- /dev/null
+++ b/include/rdma/mr_pool.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _RDMA_MR_POOL_H
+#define _RDMA_MR_POOL_H 1
+
+#include <rdma/ib_verbs.h>
+
+struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list);
+void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr);
+
+int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
+ enum ib_mr_type type, u32 max_num_sg);
+void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list);
+
+#endif /* _RDMA_MR_POOL_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index a8696551abb1..d57ceee90d26 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -467,6 +467,7 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
}
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
+void rvt_dealloc_device(struct rvt_dev_info *rdi);
int rvt_register_device(struct rvt_dev_info *rvd);
void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 497e59065c2c..0e1ff2abfe92 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -117,8 +117,9 @@
/*
* Wait flags that would prevent any packet type from being sent.
*/
-#define RVT_S_ANY_WAIT_IO (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
- RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
+#define RVT_S_ANY_WAIT_IO \
+ (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
+ RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
/*
* Wait flags that would prevent send work requests from making progress.
diff --git a/include/rdma/rw.h b/include/rdma/rw.h
new file mode 100644
index 000000000000..377d865e506d
--- /dev/null
+++ b/include/rdma/rw.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _RDMA_RW_H
+#define _RDMA_RW_H
+
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/mr_pool.h>
+
+struct rdma_rw_ctx {
+ /* number of RDMA READ/WRITE WRs (not counting MR WRs) */
+ u32 nr_ops;
+
+ /* tag for the union below: */
+ u8 type;
+
+ union {
+ /* for mapping a single SGE: */
+ struct {
+ struct ib_sge sge;
+ struct ib_rdma_wr wr;
+ } single;
+
+ /* for mapping of multiple SGEs: */
+ struct {
+ struct ib_sge *sges;
+ struct ib_rdma_wr *wrs;
+ } map;
+
+ /* for registering multiple WRs: */
+ struct rdma_rw_reg_ctx {
+ struct ib_sge sge;
+ struct ib_rdma_wr wr;
+ struct ib_reg_wr reg_wr;
+ struct ib_send_wr inv_wr;
+ struct ib_mr *mr;
+ } *reg;
+
+ struct {
+ struct rdma_rw_reg_ctx data;
+ struct rdma_rw_reg_ctx prot;
+ struct ib_send_wr sig_inv_wr;
+ struct ib_mr *sig_mr;
+ struct ib_sge sig_sge;
+ struct ib_sig_handover_wr sig_wr;
+ } *sig;
+ };
+};
+
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir);
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct scatterlist *sg, u32 sg_cnt,
+ enum dma_data_direction dir);
+
+int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
+ enum dma_data_direction dir);
+void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ enum dma_data_direction dir);
+
+struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
+
+void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
+int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
+void rdma_rw_cleanup_mrs(struct ib_qp *qp);
+
+#endif /* _RDMA_RW_H */
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 9ebab3a8cf0a..b2017440b765 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -68,8 +68,6 @@ struct rxrpc_wire_header {
} __packed;
-extern const char *rxrpc_pkts[];
-
#define RXRPC_SUPPORTED_PACKET_TYPES ( \
(1 << RXRPC_PACKET_TYPE_DATA) | \
(1 << RXRPC_PACKET_TYPE_ACK) | \
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index e0a3398b1547..8ec7c30e35af 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -18,25 +18,6 @@ enum scsi_timeouts {
};
/*
- * The maximum number of SG segments that we will put inside a
- * scatterlist (unless chaining is used). Should ideally fit inside a
- * single page, to avoid a higher order allocation. We could define this
- * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order. The
- * minimum value is 32
- */
-#define SCSI_MAX_SG_SEGMENTS 128
-
-/*
- * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
- * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
- */
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048
-#else
-#define SCSI_MAX_SG_CHAIN_SEGMENTS SCSI_MAX_SG_SEGMENTS
-#endif
-
-/*
* DIX-capable adapters effectively support infinite chaining for the
* protection information scatterlist
*/
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c067019ed12a..a6c346df290d 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -50,6 +50,12 @@ enum scsi_device_state {
SDEV_CREATED_BLOCK, /* same as above but for created devices */
};
+enum scsi_scan_mode {
+ SCSI_SCAN_INITIAL = 0,
+ SCSI_SCAN_RESCAN,
+ SCSI_SCAN_MANUAL,
+};
+
enum scsi_device_event {
SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */
@@ -242,6 +248,7 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
enum scsi_target_state {
STARGET_CREATED = 1,
STARGET_RUNNING,
+ STARGET_REMOVE,
STARGET_DEL,
};
@@ -391,7 +398,8 @@ extern void scsi_device_resume(struct scsi_device *sdev);
extern void scsi_target_quiesce(struct scsi_target *);
extern void scsi_target_resume(struct scsi_target *);
extern void scsi_scan_target(struct device *parent, unsigned int channel,
- unsigned int id, u64 lun, int rescan);
+ unsigned int id, u64 lun,
+ enum scsi_scan_mode rescan);
extern void scsi_target_reap(struct scsi_target *);
extern void scsi_target_block(struct device *);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
@@ -516,6 +524,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev)
return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
}
+/**
+ * scsi_device_supports_vpd - test if a device supports VPD pages
+ * @sdev: the &struct scsi_device to test
+ *
+ * If the 'try_vpd_pages' flag is set it takes precedence.
+ * Otherwise we will assume VPD pages are supported if the
+ * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set.
+ */
+static inline int scsi_device_supports_vpd(struct scsi_device *sdev)
+{
+ /* Attempt VPD inquiry if the device blacklist explicitly calls
+ * for it.
+ */
+ if (sdev->try_vpd_pages)
+ return 1;
+ /*
+ * Although VPD inquiries can go to SCSI-2 type devices,
+ * some USB ones crash on receiving them, and the pages
+ * we currently ask for are mandatory for SPC-2 and beyond
+ */
+ if (sdev->scsi_level >= SCSI_SPC_2 && !sdev->skip_vpd_pages)
+ return 1;
+ return 0;
+}
+
#define MODULE_ALIAS_SCSI_DEVICE(type) \
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index fcfa3d7f5e7e..76e9d278c334 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -37,7 +37,7 @@ struct blk_queue_tags;
* used in one scatter-gather request.
*/
#define SG_NONE 0
-#define SG_ALL SCSI_MAX_SG_SEGMENTS
+#define SG_ALL SG_CHUNK_SIZE
#define MODE_UNKNOWN 0x00
#define MODE_INITIATOR 0x01
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index c2ae21cbaa2c..d1defd1ebd95 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -115,6 +115,8 @@
#define VERIFY_16 0x8f
#define SYNCHRONIZE_CACHE_16 0x91
#define WRITE_SAME_16 0x93
+#define ZBC_OUT 0x94
+#define ZBC_IN 0x95
#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
#define SERVICE_ACTION_IN_16 0x9e
#define SERVICE_ACTION_OUT_16 0x9f
@@ -143,6 +145,13 @@
#define MO_SET_PRIORITY 0x0e
#define MO_SET_TIMESTAMP 0x0f
#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
+/* values for ZBC_IN */
+#define ZI_REPORT_ZONES 0x00
+/* values for ZBC_OUT */
+#define ZO_CLOSE_ZONE 0x01
+#define ZO_FINISH_ZONE 0x02
+#define ZO_OPEN_ZONE 0x03
+#define ZO_RESET_WRITE_POINTER 0x04
/* values for variable length command */
#define XDREAD_32 0x03
#define XDWRITE_32 0x04
diff --git a/include/soc/at91/atmel-sfr.h b/include/soc/at91/atmel-sfr.h
new file mode 100644
index 000000000000..2f9bb984a4df
--- /dev/null
+++ b/include/soc/at91/atmel-sfr.h
@@ -0,0 +1,18 @@
+/*
+ * Atmel SFR (Special Function Registers) register offsets and bit definitions.
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_SFR_H
+#define _LINUX_MFD_SYSCON_ATMEL_SFR_H
+
+#define AT91_SFR_I2SCLKSEL 0x90 /* I2SC Register */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
new file mode 100644
index 000000000000..9b1d43d671a3
--- /dev/null
+++ b/include/soc/nps/common.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SOC_NPS_COMMON_H
+#define SOC_NPS_COMMON_H
+
+#ifdef CONFIG_SMP
+#define NPS_IPI_IRQ 5
+#endif
+
+#define NPS_HOST_REG_BASE 0xF6000000
+
+#define NPS_MSU_BLKID 0x018
+
+#define CTOP_INST_RSPI_GIC_0_R12 0x3C56117E
+#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
+#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
+
+#ifndef __ASSEMBLY__
+
+/* In order to increase compilation test coverage */
+#ifdef CONFIG_ARC
+static inline void nps_ack_gic(void)
+{
+ __asm__ __volatile__ (
+ " .word %0\n"
+ :
+ : "i"(CTOP_INST_RSPI_GIC_0_R12)
+ : "memory");
+}
+#else
+static inline void nps_ack_gic(void) { }
+#define write_aux_reg(r, v)
+#define read_aux_reg(r) 0
+#endif
+
+/* CPU global ID */
+struct global_id {
+ union {
+ struct {
+#ifdef CONFIG_EZNPS_MTM_EXT
+ u32 __reserved:20, cluster:4, core:4, thread:4;
+#else
+ u32 __reserved:24, cluster:4, core:4;
+#endif
+ };
+ u32 value;
+ };
+};
+
+/*
+ * Convert logical to physical CPU IDs
+ *
+ * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
+ * Now quad of logical clusters id's are adjacent physically,
+ * and not like the id's physically came with each cluster.
+ * Below table is 4x4 mesh of core clusters as it layout on chip.
+ * Cluster ids are in format: logical (physical)
+ *
+ * ----------------- ------------------
+ * 3 | 5 (3) 7 (7) | | 13 (11) 15 (15)|
+ *
+ * 2 | 4 (2) 6 (6) | | 12 (10) 14 (14)|
+ * ----------------- ------------------
+ * 1 | 1 (1) 3 (5) | | 9 (9) 11 (13)|
+ *
+ * 0 | 0 (0) 2 (4) | | 8 (8) 10 (12)|
+ * ----------------- ------------------
+ * 0 1 2 3
+ */
+static inline int nps_cluster_logic_to_phys(int cluster)
+{
+#ifdef __arc__
+ __asm__ __volatile__(
+ " mov r3,%0\n"
+ " .short %1\n"
+ " .word %2\n"
+ " mov %0,r3\n"
+ : "+r"(cluster)
+ : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
+ "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
+ : "r3");
+#endif
+
+ return cluster;
+}
+
+#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
+ ({ struct global_id gid; gid.value = cpu; \
+ nps_cluster_logic_to_phys(gid.cluster); })
+
+struct nps_host_reg_address {
+ union {
+ struct {
+ u32 base:8, cl_x:4, cl_y:4,
+ blkid:6, reg:8, __reserved:2;
+ };
+ u32 value;
+ };
+};
+
+struct nps_host_reg_address_non_cl {
+ union {
+ struct {
+ u32 base:7, blkid:11, reg:12, __reserved:2;
+ };
+ u32 value;
+ };
+};
+
+static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
+{
+ struct nps_host_reg_address_non_cl reg_address;
+
+ reg_address.value = NPS_HOST_REG_BASE;
+ reg_address.blkid = blkid;
+ reg_address.reg = reg;
+
+ return (void *)reg_address.value;
+}
+
+static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
+{
+ struct nps_host_reg_address reg_address;
+ u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
+
+ reg_address.value = NPS_HOST_REG_BASE;
+ reg_address.cl_x = (cl >> 2) & 0x3;
+ reg_address.cl_y = cl & 0x3;
+ reg_address.blkid = blkid;
+ reg_address.reg = reg;
+
+ return (void *)reg_address.value;
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 961b821b6a46..b4c9219e7f95 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -26,6 +26,7 @@
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
+#define TEGRA_FUSE_USB_CALIB_EXT_0 0x250
#ifndef __ASSEMBLY__
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index d18efe402ff1..e9e53473a63e 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -33,9 +33,9 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SMP
-bool tegra_pmc_cpu_is_powered(int cpuid);
-int tegra_pmc_cpu_power_on(int cpuid);
-int tegra_pmc_cpu_remove_clamping(int cpuid);
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
+int tegra_pmc_cpu_power_on(unsigned int cpuid);
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
#endif /* CONFIG_SMP */
/*
@@ -72,6 +72,7 @@ int tegra_pmc_cpu_remove_clamping(int cpuid);
#define TEGRA_POWERGATE_AUD 27
#define TEGRA_POWERGATE_DFD 28
#define TEGRA_POWERGATE_VE2 29
+#define TEGRA_POWERGATE_MAX TEGRA_POWERGATE_VE2
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
@@ -108,50 +109,51 @@ int tegra_pmc_cpu_remove_clamping(int cpuid);
#define TEGRA_IO_RAIL_SYS_DDC 58
#ifdef CONFIG_ARCH_TEGRA
-int tegra_powergate_is_powered(int id);
-int tegra_powergate_power_on(int id);
-int tegra_powergate_power_off(int id);
-int tegra_powergate_remove_clamping(int id);
+int tegra_powergate_is_powered(unsigned int id);
+int tegra_powergate_power_on(unsigned int id);
+int tegra_powergate_power_off(unsigned int id);
+int tegra_powergate_remove_clamping(unsigned int id);
/* Must be called with clk disabled, and returns with clk enabled */
-int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst);
-int tegra_io_rail_power_on(int id);
-int tegra_io_rail_power_off(int id);
+int tegra_io_rail_power_on(unsigned int id);
+int tegra_io_rail_power_off(unsigned int id);
#else
-static inline int tegra_powergate_is_powered(int id)
+static inline int tegra_powergate_is_powered(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_power_on(int id)
+static inline int tegra_powergate_power_on(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_power_off(int id)
+static inline int tegra_powergate_power_off(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_remove_clamping(int id)
+static inline int tegra_powergate_remove_clamping(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+static inline int tegra_powergate_sequence_power_up(unsigned int id,
+ struct clk *clk,
struct reset_control *rst)
{
return -ENOSYS;
}
-static inline int tegra_io_rail_power_on(int id)
+static inline int tegra_io_rail_power_on(unsigned int id)
{
return -ENOSYS;
}
-static inline int tegra_io_rail_power_off(int id)
+static inline int tegra_io_rail_power_off(unsigned int id)
{
return -ENOSYS;
}
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index f86ef5ea9b01..67be2445941a 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -51,6 +51,16 @@ struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
void *filter_data);
struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream);
+/*
+ * The DAI supports packed transfers, eg 2 16-bit samples in a 32-bit word.
+ * If this flag is set the dmaengine driver won't put any restriction on
+ * the supported sample formats and set the DMA transfer size to undefined.
+ * The DAI driver is responsible to disable any unsupported formats in it's
+ * configuration and catch corner cases that are not already handled in
+ * the ALSA core.
+ */
+#define SND_DMAENGINE_PCM_DAI_FLAG_PACK BIT(0)
+
/**
* struct snd_dmaengine_dai_dma_data - DAI DMA configuration data
* @addr: Address of the DAI data source or destination register.
@@ -63,6 +73,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* requesting the DMA channel.
* @chan_name: Custom channel name to use when requesting DMA channel.
* @fifo_size: FIFO size of the DAI controller in bytes
+ * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
@@ -72,6 +83,7 @@ struct snd_dmaengine_dai_dma_data {
void *filter_data;
const char *chan_name;
unsigned int fifo_size;
+ unsigned int flags;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
diff --git a/include/sound/hda_chmap.h b/include/sound/hda_chmap.h
index e20d219a0304..babd445c7505 100644
--- a/include/sound/hda_chmap.h
+++ b/include/sound/hda_chmap.h
@@ -36,6 +36,8 @@ struct hdac_chmap_ops {
int (*chmap_validate)(struct hdac_chmap *hchmap, int ca,
int channels, unsigned char *chmap);
+ int (*get_spk_alloc)(struct hdac_device *hdac, int pcm_idx);
+
void (*get_chmap)(struct hdac_device *hdac, int pcm_idx,
unsigned char *chmap);
void (*set_chmap)(struct hdac_device *hdac, int pcm_idx,
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..796cabf6be5e 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,9 +9,9 @@
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
int snd_hdac_i915_exit(struct hdac_bus *bus);
@@ -25,16 +25,15 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
- return 0;
}
-static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
- int rate)
+static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
+ hda_nid_t nid, int rate)
{
return 0;
}
-static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
+static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
bool *audio_enabled, char *buffer,
int max_bytes)
{
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 2767c55a641e..ca64f0f50b45 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
unsigned int verb);
int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
unsigned int *val);
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+ unsigned int reg, unsigned int *val);
int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
unsigned int val);
int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 07fa59237feb..b9593b201599 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -14,6 +14,8 @@
* @gtscap: gts capabilities pointer
* @drsmcap: dma resume capabilities pointer
* @hlink_list: link list of HDA links
+ * @lock: lock for link mgmt
+ * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
*/
struct hdac_ext_bus {
struct hdac_bus bus;
@@ -27,6 +29,9 @@ struct hdac_ext_bus {
void __iomem *drsmcap;
struct list_head hlink_list;
+
+ struct mutex lock;
+ bool cmd_dma_state;
};
int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
@@ -142,6 +147,9 @@ struct hdac_ext_link {
void __iomem *ml_addr; /* link output stream reg pointer */
u32 lcaps; /* link capablities */
u16 lsdiid; /* link sdi identifier */
+
+ int ref_count;
+
struct list_head list;
};
@@ -154,6 +162,11 @@ void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int stream);
+int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
+ struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
+ struct hdac_ext_link *link);
+
/* update register macro */
#define snd_hdac_updatel(addr, reg, mask, val) \
writel(((readl(addr + reg) & ~(mask)) | (val)), \
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
new file mode 100644
index 000000000000..fc3a481ad91e
--- /dev/null
+++ b/include/sound/hdmi-codec.h
@@ -0,0 +1,100 @@
+/*
+ * hdmi-codec.h - HDMI Codec driver API
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDMI_CODEC_H__
+#define __HDMI_CODEC_H__
+
+#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
+#include <sound/asoundef.h>
+#include <uapi/sound/asound.h>
+
+/*
+ * Protocol between ASoC cpu-dai and HDMI-encoder
+ */
+struct hdmi_codec_daifmt {
+ enum {
+ HDMI_I2S,
+ HDMI_RIGHT_J,
+ HDMI_LEFT_J,
+ HDMI_DSP_A,
+ HDMI_DSP_B,
+ HDMI_AC97,
+ HDMI_SPDIF,
+ } fmt;
+ int bit_clk_inv:1;
+ int frame_clk_inv:1;
+ int bit_clk_master:1;
+ int frame_clk_master:1;
+};
+
+/*
+ * HDMI audio parameters
+ */
+struct hdmi_codec_params {
+ struct hdmi_audio_infoframe cea;
+ struct snd_aes_iec958 iec;
+ int sample_rate;
+ int sample_width;
+ int channels;
+};
+
+struct hdmi_codec_ops {
+ /*
+ * Called when ASoC starts an audio stream setup.
+ * Optional
+ */
+ int (*audio_startup)(struct device *dev);
+
+ /*
+ * Configures HDMI-encoder for audio stream.
+ * Mandatory
+ */
+ int (*hw_params)(struct device *dev,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /*
+ * Shuts down the audio stream.
+ * Mandatory
+ */
+ void (*audio_shutdown)(struct device *dev);
+
+ /*
+ * Mute/unmute HDMI audio stream.
+ * Optional
+ */
+ int (*digital_mute)(struct device *dev, bool enable);
+
+ /*
+ * Provides EDID-Like-Data from connected HDMI device.
+ * Optional
+ */
+ int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+};
+
+/* HDMI codec initalization data */
+struct hdmi_codec_pdata {
+ const struct hdmi_codec_ops *ops;
+ uint i2s:1;
+ uint spdif:1;
+ int max_i2s_channels;
+};
+
+#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
+
+#endif /* __HDMI_CODEC_H__ */
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0eed397aca8e..36f023acb201 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -6,4 +6,6 @@
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len);
+int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len);
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 97069466c38d..3101d53468aa 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -100,6 +100,7 @@ struct device;
{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */
#define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_micbias, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
@@ -473,7 +474,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_out_drv, /* output driver */
snd_soc_dapm_adc, /* analog to digital converter */
snd_soc_dapm_dac, /* digital to analog converter */
- snd_soc_dapm_micbias, /* microphone bias (power) */
+ snd_soc_dapm_micbias, /* microphone bias (power) - DEPRECATED: use snd_soc_dapm_supply */
snd_soc_dapm_mic, /* microphone */
snd_soc_dapm_hp, /* headphones */
snd_soc_dapm_spk, /* speaker */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 02b4a215fd75..fd7b58a58d6f 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1002,7 +1002,7 @@ struct snd_soc_dai_link {
*/
const char *platform_name;
struct device_node *platform_of_node;
- int be_id; /* optional ID for machine driver BE identification */
+ int id; /* optional ID for machine driver link identification */
const struct snd_soc_pcm_stream *params;
unsigned int num_params;
@@ -1683,6 +1683,9 @@ void snd_soc_remove_dai_link(struct snd_soc_card *card,
int snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv);
+struct snd_soc_dai *snd_soc_find_dai(
+ const struct snd_soc_dai_link_component *dlc);
+
#include <sound/soc-dai.h>
#ifdef CONFIG_DEBUG_FS
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 28ee5c2e6bcd..d8ab5101fad5 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -85,7 +85,6 @@ extern struct configfs_attribute *passthrough_attrib_attrs[];
void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *);
/* core helpers also used by xcopy during internal command setup */
-int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
struct scatterlist *, u32, struct scatterlist *, u32);
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 685a51aa98cc..78d88f03b296 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -76,6 +76,7 @@ struct target_core_fabric_ops {
struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
struct config_group *, const char *);
void (*fabric_drop_wwn)(struct se_wwn *);
+ void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
struct config_group *, const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
@@ -87,7 +88,6 @@ struct target_core_fabric_ops {
struct config_group *, const char *);
void (*fabric_drop_np)(struct se_tpg_np *);
int (*fabric_init_nodeacl)(struct se_node_acl *, const char *);
- void (*fabric_cleanup_nodeacl)(struct se_node_acl *);
struct configfs_attribute **tfc_discovery_attrs;
struct configfs_attribute **tfc_wwn_attrs;
@@ -185,6 +185,10 @@ int core_tpg_set_initiator_node_tag(struct se_portal_group *,
int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
int core_tpg_deregister(struct se_portal_group *);
+int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
+ u32 length, bool zero_page, bool chainable);
+void target_free_sgl(struct scatterlist *sgl, int nents);
+
/*
* The LIO target core uses DMA_TO_DEVICE to mean that data is going
* to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 677807f29a1c..e90e82ad6875 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,7 +23,7 @@ struct map_lookup;
struct extent_buffer;
struct btrfs_work;
struct __btrfs_workqueue;
-struct btrfs_qgroup_operation;
+struct btrfs_qgroup_extent_record;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -1231,6 +1231,93 @@ DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
TP_ARGS(ref_root, reserved)
);
+
+DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
+ TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+ TP_ARGS(rec),
+
+ TP_STRUCT__entry(
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ ),
+
+ TP_fast_assign(
+ __entry->bytenr = rec->bytenr,
+ __entry->num_bytes = rec->num_bytes;
+ ),
+
+ TP_printk("bytenr = %llu, num_bytes = %llu",
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes)
+);
+
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
+
+ TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+ TP_ARGS(rec)
+);
+
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+
+ TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+
+ TP_ARGS(rec)
+);
+
+TRACE_EVENT(btrfs_qgroup_account_extent,
+
+ TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
+
+ TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
+
+ TP_STRUCT__entry(
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( u64, nr_old_roots )
+ __field( u64, nr_new_roots )
+ ),
+
+ TP_fast_assign(
+ __entry->bytenr = bytenr;
+ __entry->num_bytes = num_bytes;
+ __entry->nr_old_roots = nr_old_roots;
+ __entry->nr_new_roots = nr_new_roots;
+ ),
+
+ TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
+ "nr_new_roots = %llu",
+ __entry->bytenr,
+ __entry->num_bytes,
+ __entry->nr_old_roots,
+ __entry->nr_new_roots)
+);
+
+TRACE_EVENT(qgroup_update_counters,
+
+ TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
+
+ TP_ARGS(qgid, cur_old_count, cur_new_count),
+
+ TP_STRUCT__entry(
+ __field( u64, qgid )
+ __field( u64, cur_old_count )
+ __field( u64, cur_new_count )
+ ),
+
+ TP_fast_assign(
+ __entry->qgid = qgid;
+ __entry->cur_old_count = cur_old_count;
+ __entry->cur_new_count = cur_new_count;
+ ),
+
+ TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+ __entry->qgid,
+ __entry->cur_old_count,
+ __entry->cur_new_count)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 4e4b2fa78609..09c71e9aaebf 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -872,7 +872,7 @@ TRACE_EVENT(ext4_sync_file_enter,
TP_fast_assign(
struct dentry *dentry = file->f_path.dentry;
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->dev = dentry->d_sb->s_dev;
__entry->ino = d_inode(dentry)->i_ino;
__entry->datasync = datasync;
__entry->parent = d_inode(dentry->d_parent)->i_ino;
@@ -1451,7 +1451,7 @@ TRACE_EVENT(ext4_unlink_enter,
),
TP_fast_assign(
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->dev = dentry->d_sb->s_dev;
__entry->ino = d_inode(dentry)->i_ino;
__entry->parent = parent->i_ino;
__entry->size = d_inode(dentry)->i_size;
@@ -1475,7 +1475,7 @@ TRACE_EVENT(ext4_unlink_exit,
),
TP_fast_assign(
- __entry->dev = d_inode(dentry)->i_sb->s_dev;
+ __entry->dev = dentry->d_sb->s_dev;
__entry->ino = d_inode(dentry)->i_ino;
__entry->ret = ret;
),
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index aa69253ecc7d..526fb3d2e43a 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -38,22 +38,25 @@ TRACE_EVENT(kvm_userspace_exit,
);
TRACE_EVENT(kvm_vcpu_wakeup,
- TP_PROTO(__u64 ns, bool waited),
- TP_ARGS(ns, waited),
+ TP_PROTO(__u64 ns, bool waited, bool valid),
+ TP_ARGS(ns, waited, valid),
TP_STRUCT__entry(
__field( __u64, ns )
__field( bool, waited )
+ __field( bool, valid )
),
TP_fast_assign(
__entry->ns = ns;
__entry->waited = waited;
+ __entry->valid = valid;
),
- TP_printk("%s time %lld ns",
+ TP_printk("%s time %lld ns, polling %s",
__entry->waited ? "wait" : "poll",
- __entry->ns)
+ __entry->ns,
+ __entry->valid ? "valid" : "invalid")
);
#if defined(CONFIG_HAVE_KVM_IRQFD)
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
new file mode 100644
index 000000000000..a72f9b94c80b
--- /dev/null
+++ b/include/trace/events/mmc.h
@@ -0,0 +1,182 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mmc
+
+#if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MMC_H
+
+#include <linux/blkdev.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(mmc_request_start,
+
+ TP_PROTO(struct mmc_host *host, struct mmc_request *mrq),
+
+ TP_ARGS(host, mrq),
+
+ TP_STRUCT__entry(
+ __field(u32, cmd_opcode)
+ __field(u32, cmd_arg)
+ __field(unsigned int, cmd_flags)
+ __field(unsigned int, cmd_retries)
+ __field(u32, stop_opcode)
+ __field(u32, stop_arg)
+ __field(unsigned int, stop_flags)
+ __field(unsigned int, stop_retries)
+ __field(u32, sbc_opcode)
+ __field(u32, sbc_arg)
+ __field(unsigned int, sbc_flags)
+ __field(unsigned int, sbc_retries)
+ __field(unsigned int, blocks)
+ __field(unsigned int, blksz)
+ __field(unsigned int, data_flags)
+ __field(unsigned int, can_retune)
+ __field(unsigned int, doing_retune)
+ __field(unsigned int, retune_now)
+ __field(int, need_retune)
+ __field(int, hold_retune)
+ __field(unsigned int, retune_period)
+ __field(struct mmc_request *, mrq)
+ __string(name, mmc_hostname(host))
+ ),
+
+ TP_fast_assign(
+ __entry->cmd_opcode = mrq->cmd->opcode;
+ __entry->cmd_arg = mrq->cmd->arg;
+ __entry->cmd_flags = mrq->cmd->flags;
+ __entry->cmd_retries = mrq->cmd->retries;
+ __entry->stop_opcode = mrq->stop ? mrq->stop->opcode : 0;
+ __entry->stop_arg = mrq->stop ? mrq->stop->arg : 0;
+ __entry->stop_flags = mrq->stop ? mrq->stop->flags : 0;
+ __entry->stop_retries = mrq->stop ? mrq->stop->retries : 0;
+ __entry->sbc_opcode = mrq->sbc ? mrq->sbc->opcode : 0;
+ __entry->sbc_arg = mrq->sbc ? mrq->sbc->arg : 0;
+ __entry->sbc_flags = mrq->sbc ? mrq->sbc->flags : 0;
+ __entry->sbc_retries = mrq->sbc ? mrq->sbc->retries : 0;
+ __entry->blksz = mrq->data ? mrq->data->blksz : 0;
+ __entry->blocks = mrq->data ? mrq->data->blocks : 0;
+ __entry->data_flags = mrq->data ? mrq->data->flags : 0;
+ __entry->can_retune = host->can_retune;
+ __entry->doing_retune = host->doing_retune;
+ __entry->retune_now = host->retune_now;
+ __entry->need_retune = host->need_retune;
+ __entry->hold_retune = host->hold_retune;
+ __entry->retune_period = host->retune_period;
+ __assign_str(name, mmc_hostname(host));
+ __entry->mrq = mrq;
+ ),
+
+ TP_printk("%s: start struct mmc_request[%p]: "
+ "cmd_opcode=%u cmd_arg=0x%x cmd_flags=0x%x cmd_retries=%u "
+ "stop_opcode=%u stop_arg=0x%x stop_flags=0x%x stop_retries=%u "
+ "sbc_opcode=%u sbc_arg=0x%x sbc_flags=0x%x sbc_retires=%u "
+ "blocks=%u block_size=%u data_flags=0x%x "
+ "can_retune=%u doing_retune=%u retune_now=%u "
+ "need_retune=%d hold_retune=%d retune_period=%u",
+ __get_str(name), __entry->mrq,
+ __entry->cmd_opcode, __entry->cmd_arg,
+ __entry->cmd_flags, __entry->cmd_retries,
+ __entry->stop_opcode, __entry->stop_arg,
+ __entry->stop_flags, __entry->stop_retries,
+ __entry->sbc_opcode, __entry->sbc_arg,
+ __entry->sbc_flags, __entry->sbc_retries,
+ __entry->blocks, __entry->blksz, __entry->data_flags,
+ __entry->can_retune, __entry->doing_retune,
+ __entry->retune_now, __entry->need_retune,
+ __entry->hold_retune, __entry->retune_period)
+);
+
+TRACE_EVENT(mmc_request_done,
+
+ TP_PROTO(struct mmc_host *host, struct mmc_request *mrq),
+
+ TP_ARGS(host, mrq),
+
+ TP_STRUCT__entry(
+ __field(u32, cmd_opcode)
+ __field(int, cmd_err)
+ __array(u32, cmd_resp, 4)
+ __field(unsigned int, cmd_retries)
+ __field(u32, stop_opcode)
+ __field(int, stop_err)
+ __array(u32, stop_resp, 4)
+ __field(unsigned int, stop_retries)
+ __field(u32, sbc_opcode)
+ __field(int, sbc_err)
+ __array(u32, sbc_resp, 4)
+ __field(unsigned int, sbc_retries)
+ __field(unsigned int, bytes_xfered)
+ __field(int, data_err)
+ __field(unsigned int, can_retune)
+ __field(unsigned int, doing_retune)
+ __field(unsigned int, retune_now)
+ __field(int, need_retune)
+ __field(int, hold_retune)
+ __field(unsigned int, retune_period)
+ __field(struct mmc_request *, mrq)
+ __string(name, mmc_hostname(host))
+ ),
+
+ TP_fast_assign(
+ __entry->cmd_opcode = mrq->cmd->opcode;
+ __entry->cmd_err = mrq->cmd->error;
+ memcpy(__entry->cmd_resp, mrq->cmd->resp, 4);
+ __entry->cmd_retries = mrq->cmd->retries;
+ __entry->stop_opcode = mrq->stop ? mrq->stop->opcode : 0;
+ __entry->stop_err = mrq->stop ? mrq->stop->error : 0;
+ __entry->stop_resp[0] = mrq->stop ? mrq->stop->resp[0] : 0;
+ __entry->stop_resp[1] = mrq->stop ? mrq->stop->resp[1] : 0;
+ __entry->stop_resp[2] = mrq->stop ? mrq->stop->resp[2] : 0;
+ __entry->stop_resp[3] = mrq->stop ? mrq->stop->resp[3] : 0;
+ __entry->stop_retries = mrq->stop ? mrq->stop->retries : 0;
+ __entry->sbc_opcode = mrq->sbc ? mrq->sbc->opcode : 0;
+ __entry->sbc_err = mrq->sbc ? mrq->sbc->error : 0;
+ __entry->sbc_resp[0] = mrq->sbc ? mrq->sbc->resp[0] : 0;
+ __entry->sbc_resp[1] = mrq->sbc ? mrq->sbc->resp[1] : 0;
+ __entry->sbc_resp[2] = mrq->sbc ? mrq->sbc->resp[2] : 0;
+ __entry->sbc_resp[3] = mrq->sbc ? mrq->sbc->resp[3] : 0;
+ __entry->sbc_retries = mrq->sbc ? mrq->sbc->retries : 0;
+ __entry->bytes_xfered = mrq->data ? mrq->data->bytes_xfered : 0;
+ __entry->data_err = mrq->data ? mrq->data->error : 0;
+ __entry->can_retune = host->can_retune;
+ __entry->doing_retune = host->doing_retune;
+ __entry->retune_now = host->retune_now;
+ __entry->need_retune = host->need_retune;
+ __entry->hold_retune = host->hold_retune;
+ __entry->retune_period = host->retune_period;
+ __assign_str(name, mmc_hostname(host));
+ __entry->mrq = mrq;
+ ),
+
+ TP_printk("%s: end struct mmc_request[%p]: "
+ "cmd_opcode=%u cmd_err=%d cmd_resp=0x%x 0x%x 0x%x 0x%x "
+ "cmd_retries=%u stop_opcode=%u stop_err=%d "
+ "stop_resp=0x%x 0x%x 0x%x 0x%x stop_retries=%u "
+ "sbc_opcode=%u sbc_err=%d sbc_resp=0x%x 0x%x 0x%x 0x%x "
+ "sbc_retries=%u bytes_xfered=%u data_err=%d "
+ "can_retune=%u doing_retune=%u retune_now=%u need_retune=%d "
+ "hold_retune=%d retune_period=%u",
+ __get_str(name), __entry->mrq,
+ __entry->cmd_opcode, __entry->cmd_err,
+ __entry->cmd_resp[0], __entry->cmd_resp[1],
+ __entry->cmd_resp[2], __entry->cmd_resp[3],
+ __entry->cmd_retries,
+ __entry->stop_opcode, __entry->stop_err,
+ __entry->stop_resp[0], __entry->stop_resp[1],
+ __entry->stop_resp[2], __entry->stop_resp[3],
+ __entry->stop_retries,
+ __entry->sbc_opcode, __entry->sbc_err,
+ __entry->sbc_resp[0], __entry->sbc_resp[1],
+ __entry->sbc_resp[2], __entry->sbc_resp[3],
+ __entry->sbc_retries,
+ __entry->bytes_xfered, __entry->data_err,
+ __entry->can_retune, __entry->doing_retune,
+ __entry->retune_now, __entry->need_retune,
+ __entry->hold_retune, __entry->retune_period)
+);
+
+#endif /* _TRACE_MMC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/page_isolation.h b/include/trace/events/page_isolation.h
index 6fb644029c80..8738a78e6bf4 100644
--- a/include/trace/events/page_isolation.h
+++ b/include/trace/events/page_isolation.h
@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
__entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
- __entry->end_pfn == __entry->fin_pfn ? "success" : "fail")
+ __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
);
#endif /* _TRACE_PAGE_ISOLATION_H */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index ef72c4aada56..d3e756539d44 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -172,6 +172,77 @@ TRACE_EVENT(rcu_grace_period_init,
);
/*
+ * Tracepoint for expedited grace-period events. Takes a string identifying
+ * the RCU flavor, the expedited grace-period sequence number, and a string
+ * identifying the grace-period-related event as follows:
+ *
+ * "snap": Captured snapshot of expedited grace period sequence number.
+ * "start": Started a real expedited grace period.
+ * "end": Ended a real expedited grace period.
+ * "endwake": Woke piggybackers up.
+ * "done": Someone else did the expedited grace period for us.
+ */
+TRACE_EVENT(rcu_exp_grace_period,
+
+ TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
+
+ TP_ARGS(rcuname, gpseq, gpevent),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(unsigned long, gpseq)
+ __field(const char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpseq = gpseq;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %lu %s",
+ __entry->rcuname, __entry->gpseq, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for expedited grace-period funnel-locking events. Takes a
+ * string identifying the RCU flavor, an integer identifying the rcu_node
+ * combining-tree level, another pair of integers identifying the lowest-
+ * and highest-numbered CPU associated with the current rcu_node structure,
+ * and a string. identifying the grace-period-related event as follows:
+ *
+ * "nxtlvl": Advance to next level of rcu_node funnel
+ * "wait": Wait for someone else to do expedited GP
+ */
+TRACE_EVENT(rcu_exp_funnel_lock,
+
+ TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
+ const char *gpevent),
+
+ TP_ARGS(rcuname, level, grplo, grphi, gpevent),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(const char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %d %d %d %s",
+ __entry->rcuname, __entry->level, __entry->grplo,
+ __entry->grphi, __entry->gpevent)
+);
+
+/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
*
@@ -704,11 +775,15 @@ TRACE_EVENT(rcu_barrier,
#else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
- qsmask) do { } while (0)
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
level, grplo, grphi, event) \
do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+ qsmask) do { } while (0)
+#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
+ do { } while (0)
+#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
+ do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 079bd10a01b4..9a9b3e2550af 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -94,11 +94,9 @@
scsi_opcode_name(WRITE_16), \
scsi_opcode_name(VERIFY_16), \
scsi_opcode_name(WRITE_SAME_16), \
+ scsi_opcode_name(ZBC_OUT), \
+ scsi_opcode_name(ZBC_IN), \
scsi_opcode_name(SERVICE_ACTION_IN_16), \
- scsi_opcode_name(SAI_READ_CAPACITY_16), \
- scsi_opcode_name(SAI_GET_LBA_STATUS), \
- scsi_opcode_name(MI_REPORT_TARGET_PGS), \
- scsi_opcode_name(MO_SET_TARGET_PGS), \
scsi_opcode_name(READ_32), \
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 26486fcd74ce..88de5c205e86 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -20,9 +20,6 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
-#undef __perf_addr
-#define __perf_addr(a) (__addr = (a))
-
#undef __perf_count
#define __perf_count(c) (__count = (c))
@@ -37,8 +34,9 @@ perf_trace_##call(void *__data, proto) \
struct trace_event_call *event_call = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
struct trace_event_raw_##call *entry; \
+ struct bpf_prog *prog = event_call->prog; \
struct pt_regs *__regs; \
- u64 __addr = 0, __count = 1; \
+ u64 __count = 1; \
struct task_struct *__task = NULL; \
struct hlist_head *head; \
int __entry_size; \
@@ -48,7 +46,7 @@ perf_trace_##call(void *__data, proto) \
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
head = this_cpu_ptr(event_call->perf_events); \
- if (__builtin_constant_p(!__task) && !__task && \
+ if (!prog && __builtin_constant_p(!__task) && !__task && \
hlist_empty(head)) \
return; \
\
@@ -56,8 +54,7 @@ perf_trace_##call(void *__data, proto) \
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- entry = perf_trace_buf_prepare(__entry_size, \
- event_call->event.type, &__regs, &rctx); \
+ entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
if (!entry) \
return; \
\
@@ -67,8 +64,9 @@ perf_trace_##call(void *__data, proto) \
\
{ assign; } \
\
- perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, __regs, head, __task); \
+ perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
+ event_call, __count, __regs, \
+ head, __task); \
}
/*
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 170c93bbdbb7..80679a9fae65 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -652,9 +652,6 @@ static inline notrace int trace_event_get_offsets_##call( \
#undef TP_fast_assign
#define TP_fast_assign(args...) args
-#undef __perf_addr
-#define __perf_addr(a) (a)
-
#undef __perf_count
#define __perf_count(c) (c)
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 2622b33fb2ec..c51afb71bfab 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -717,9 +717,13 @@ __SYSCALL(__NR_membarrier, sys_membarrier)
__SYSCALL(__NR_mlock2, sys_mlock2)
#define __NR_copy_file_range 285
__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 287
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
#undef __NR_syscalls
-#define __NR_syscalls 286
+#define __NR_syscalls 288
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b71fd0b5cbad..8bdae34d1f9a 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -96,6 +96,7 @@ header-y += cyclades.h
header-y += cycx_cfm.h
header-y += dcbnl.h
header-y += dccp.h
+header-y += devlink.h
header-y += dlmconstants.h
header-y += dlm_device.h
header-y += dlm.h
@@ -140,6 +141,7 @@ header-y += gfs2_ondisk.h
header-y += gigaset_dev.h
header-y += gpio.h
header-y += gsmmux.h
+header-y += gtp.h
header-y += hdlcdrv.h
header-y += hdlc.h
header-y += hdreg.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 924f537183fd..406459b935a2 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -92,6 +92,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_KPROBE,
BPF_PROG_TYPE_SCHED_CLS,
BPF_PROG_TYPE_SCHED_ACT,
+ BPF_PROG_TYPE_TRACEPOINT,
};
#define BPF_PSEUDO_MAP_FD 1
@@ -346,6 +347,10 @@ enum bpf_func_id {
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
+/* BPF_FUNC_perf_event_output flags. */
+#define BPF_F_INDEX_MASK 0xffffffffULL
+#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -365,6 +370,8 @@ struct __sk_buff {
__u32 cb[5];
__u32 hash;
__u32 tc_classid;
+ __u32 data;
+ __u32 data_end;
};
struct bpf_tunnel_key {
@@ -375,6 +382,7 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
+ __u16 tunnel_ext;
__u32 tunnel_label;
};
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index c9fee5781eb1..ba0073b26fa6 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -33,6 +33,30 @@ enum devlink_command {
DEVLINK_CMD_PORT_SPLIT,
DEVLINK_CMD_PORT_UNSPLIT,
+ DEVLINK_CMD_SB_GET, /* can dump */
+ DEVLINK_CMD_SB_SET,
+ DEVLINK_CMD_SB_NEW,
+ DEVLINK_CMD_SB_DEL,
+
+ DEVLINK_CMD_SB_POOL_GET, /* can dump */
+ DEVLINK_CMD_SB_POOL_SET,
+ DEVLINK_CMD_SB_POOL_NEW,
+ DEVLINK_CMD_SB_POOL_DEL,
+
+ DEVLINK_CMD_SB_PORT_POOL_GET, /* can dump */
+ DEVLINK_CMD_SB_PORT_POOL_SET,
+ DEVLINK_CMD_SB_PORT_POOL_NEW,
+ DEVLINK_CMD_SB_PORT_POOL_DEL,
+
+ DEVLINK_CMD_SB_TC_POOL_BIND_GET, /* can dump */
+ DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+ DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+ DEVLINK_CMD_SB_TC_POOL_BIND_DEL,
+
+ /* Shared buffer occupancy monitoring commands */
+ DEVLINK_CMD_SB_OCC_SNAPSHOT,
+ DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
@@ -46,6 +70,31 @@ enum devlink_port_type {
DEVLINK_PORT_TYPE_IB,
};
+enum devlink_sb_pool_type {
+ DEVLINK_SB_POOL_TYPE_INGRESS,
+ DEVLINK_SB_POOL_TYPE_EGRESS,
+};
+
+/* static threshold - limiting the maximum number of bytes.
+ * dynamic threshold - limiting the maximum number of bytes
+ * based on the currently available free space in the shared buffer pool.
+ * In this mode, the maximum quota is calculated based
+ * on the following formula:
+ * max_quota = alpha / (1 + alpha) * Free_Buffer
+ * While Free_Buffer is the amount of none-occupied buffer associated to
+ * the relevant pool.
+ * The value range which can be passed is 0-20 and serves
+ * for computation of alpha by following formula:
+ * alpha = 2 ^ (passed_value - 10)
+ */
+
+enum devlink_sb_threshold_type {
+ DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC,
+};
+
+#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -62,6 +111,20 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */
DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */
DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */
+ DEVLINK_ATTR_SB_INDEX, /* u32 */
+ DEVLINK_ATTR_SB_SIZE, /* u32 */
+ DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_INGRESS_TC_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_EGRESS_TC_COUNT, /* u16 */
+ DEVLINK_ATTR_SB_POOL_INDEX, /* u16 */
+ DEVLINK_ATTR_SB_POOL_TYPE, /* u8 */
+ DEVLINK_ATTR_SB_POOL_SIZE, /* u32 */
+ DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, /* u8 */
+ DEVLINK_ATTR_SB_THRESHOLD, /* u32 */
+ DEVLINK_ATTR_SB_TC_INDEX, /* u16 */
+ DEVLINK_ATTR_SB_OCC_CUR, /* u32 */
+ DEVLINK_ATTR_SB_OCC_MAX, /* u32 */
/* add new attributes above here, update the policy in devlink.c */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 71e1d0ed92f7..cb4a72f888d5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -282,16 +282,18 @@ typedef struct elf64_phdr {
#define SHT_HIUSER 0xffffffff
/* sh_flags */
-#define SHF_WRITE 0x1
-#define SHF_ALLOC 0x2
-#define SHF_EXECINSTR 0x4
-#define SHF_MASKPROC 0xf0000000
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_RELA_LIVEPATCH 0x00100000
+#define SHF_MASKPROC 0xf0000000
/* special section indexes */
#define SHN_UNDEF 0
#define SHN_LORESERVE 0xff00
#define SHN_LOPROC 0xff00
#define SHN_HIPROC 0xff1f
+#define SHN_LIVEPATCH 0xff20
#define SHN_ABS 0xfff1
#define SHN_COMMON 0xfff2
#define SHN_HIRESERVE 0xffff
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 96161b8202b5..620c8a5ddc00 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -49,6 +49,7 @@ enum {
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
+ FRA_PAD,
__FRA_MAX
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index a079d50376e1..e21fe04acc12 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -324,5 +324,7 @@ struct fscrypt_policy {
/* flags for preadv2/pwritev2: */
#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
+#define RWF_DSYNC 0x00000002 /* per-IO O_DSYNC */
+#define RWF_SYNC 0x00000004 /* per-IO O_SYNC */
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 6487317ea619..52deccc2128e 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -10,6 +10,7 @@ enum {
TCA_STATS_QUEUE,
TCA_STATS_APP,
TCA_STATS_RATE_EST64,
+ TCA_STATS_PAD,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
new file mode 100644
index 000000000000..ca1054dd8249
--- /dev/null
+++ b/include/uapi/linux/gtp.h
@@ -0,0 +1,33 @@
+#ifndef _UAPI_LINUX_GTP_H_
+#define _UAPI_LINUX_GTP_H__
+
+enum gtp_genl_cmds {
+ GTP_CMD_NEWPDP,
+ GTP_CMD_DELPDP,
+ GTP_CMD_GETPDP,
+
+ GTP_CMD_MAX,
+};
+
+enum gtp_version {
+ GTP_V0 = 0,
+ GTP_V1,
+};
+
+enum gtp_attrs {
+ GTPA_UNSPEC = 0,
+ GTPA_LINK,
+ GTPA_VERSION,
+ GTPA_TID, /* for GTPv0 only */
+ GTPA_SGSN_ADDRESS,
+ GTPA_MS_ADDRESS,
+ GTPA_FLOW,
+ GTPA_NET_NS_FD,
+ GTPA_I_TEI, /* for GTPv1 only */
+ GTPA_O_TEI, /* for GTPv1 only */
+ GTPA_PAD,
+ __GTPA_MAX,
+};
+#define GTPA_MAX (__GTPA_MAX + 1)
+
+#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h
index b0a7dd61eb35..adcbef4bff61 100644
--- a/include/uapi/linux/i2c.h
+++ b/include/uapi/linux/i2c.h
@@ -68,14 +68,15 @@
struct i2c_msg {
__u16 addr; /* slave address */
__u16 flags;
-#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
-#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
-#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
-#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
+ /* I2C_M_RD is guaranteed to be 0x0001! */
+#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */
+#define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
+#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
+#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
__u16 len; /* msg length */
__u8 *buf; /* pointer to msg data */
};
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index f80277569f24..e601c8c3bdc7 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -19,14 +19,20 @@
#ifndef _LINUX_IF_H
#define _LINUX_IF_H
+#include <linux/libc-compat.h> /* for compatibility with glibc */
#include <linux/types.h> /* for "__kernel_caddr_t" et al */
#include <linux/socket.h> /* for "struct sockaddr" et al */
#include <linux/compiler.h> /* for "__user" et al */
+#if __UAPI_DEF_IF_IFNAMSIZ
#define IFNAMSIZ 16
+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256
#include <linux/hdlc/ioctl.h>
+/* For glibc compatibility. An empty enum does not compile. */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+ __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
/**
* enum net_device_flags - &struct net_device flags
*
@@ -68,6 +74,8 @@
* @IFF_ECHO: echo sent packets. Volatile.
*/
enum net_device_flags {
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
IFF_UP = 1<<0, /* sysfs */
IFF_BROADCAST = 1<<1, /* volatile */
IFF_DEBUG = 1<<2, /* sysfs */
@@ -84,11 +92,17 @@ enum net_device_flags {
IFF_PORTSEL = 1<<13, /* sysfs */
IFF_AUTOMEDIA = 1<<14, /* sysfs */
IFF_DYNAMIC = 1<<15, /* sysfs */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
IFF_LOWER_UP = 1<<16, /* volatile */
IFF_DORMANT = 1<<17, /* volatile */
IFF_ECHO = 1<<18, /* volatile */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
};
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
#define IFF_UP IFF_UP
#define IFF_BROADCAST IFF_BROADCAST
#define IFF_DEBUG IFF_DEBUG
@@ -105,9 +119,13 @@ enum net_device_flags {
#define IFF_PORTSEL IFF_PORTSEL
#define IFF_AUTOMEDIA IFF_AUTOMEDIA
#define IFF_DYNAMIC IFF_DYNAMIC
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
#define IFF_LOWER_UP IFF_LOWER_UP
#define IFF_DORMANT IFF_DORMANT
#define IFF_ECHO IFF_ECHO
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -166,6 +184,8 @@ enum {
* being very small might be worth keeping for clean configuration.
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFMAP
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
@@ -175,6 +195,7 @@ struct ifmap {
unsigned char port;
/* 3 bytes spare */
};
+#endif /* __UAPI_DEF_IF_IFMAP */
struct if_settings {
unsigned int type; /* Type of physical device or protocol */
@@ -200,6 +221,8 @@ struct if_settings {
* remainder may be interface specific.
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFREQ
struct ifreq {
#define IFHWADDRLEN 6
union
@@ -223,6 +246,7 @@ struct ifreq {
struct if_settings ifru_settings;
} ifr_ifru;
};
+#endif /* __UAPI_DEF_IF_IFREQ */
#define ifr_name ifr_ifrn.ifrn_name /* interface name */
#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
@@ -249,6 +273,8 @@ struct ifreq {
* must know all networks accessible).
*/
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFCONF
struct ifconf {
int ifc_len; /* size of buffer */
union {
@@ -256,6 +282,8 @@ struct ifconf {
struct ifreq __user *ifcu_req;
} ifc_ifcu;
};
+#endif /* __UAPI_DEF_IF_IFCONF */
+
#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 0536eefff9bf..397d503fdedb 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -134,6 +134,16 @@ struct bridge_vlan_info {
__u16 vid;
};
+struct bridge_vlan_xstats {
+ __u64 rx_bytes;
+ __u64 rx_packets;
+ __u64 tx_bytes;
+ __u64 tx_packets;
+ __u16 vid;
+ __u16 pad1;
+ __u32 pad2;
+};
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
@@ -233,4 +243,12 @@ enum {
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
+enum {
+ BRIDGE_XSTATS_UNSPEC,
+ BRIDGE_XSTATS_VLAN,
+ __BRIDGE_XSTATS_MAX
+};
+#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 4a93051c578c..cec849a239f6 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -92,6 +92,7 @@
#define ETH_P_TDLS 0x890D /* TDLS */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */
+#define ETH_P_HSR 0x892F /* IEC 62439-3 HSRv1 */
#define ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */
#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index c488066fb53a..bb36bd5675a7 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -155,6 +155,7 @@ enum {
IFLA_PROTO_DOWN,
IFLA_GSO_MAX_SEGS,
IFLA_GSO_MAX_SIZE,
+ IFLA_PAD,
__IFLA_MAX
};
@@ -270,6 +271,8 @@ enum {
IFLA_BR_NF_CALL_IP6TABLES,
IFLA_BR_NF_CALL_ARPTABLES,
IFLA_BR_VLAN_DEFAULT_PVID,
+ IFLA_BR_PAD,
+ IFLA_BR_VLAN_STATS_ENABLED,
__IFLA_BR_MAX,
};
@@ -312,6 +315,7 @@ enum {
IFLA_BRPORT_HOLD_TIMER,
IFLA_BRPORT_FLUSH,
IFLA_BRPORT_MULTICAST_ROUTER,
+ IFLA_BRPORT_PAD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -431,6 +435,7 @@ enum {
IFLA_MACSEC_SCB,
IFLA_MACSEC_REPLAY_PROTECT,
IFLA_MACSEC_VALIDATION,
+ IFLA_MACSEC_PAD,
__IFLA_MACSEC_MAX,
};
@@ -488,6 +493,7 @@ enum {
IFLA_VXLAN_REMCSUM_NOPARTIAL,
IFLA_VXLAN_COLLECT_METADATA,
IFLA_VXLAN_LABEL,
+ IFLA_VXLAN_GPE,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -515,6 +521,24 @@ enum {
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+/* PPP section */
+enum {
+ IFLA_PPP_UNSPEC,
+ IFLA_PPP_DEV_FD,
+ __IFLA_PPP_MAX
+};
+#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
+
+/* GTP section */
+enum {
+ IFLA_GTP_UNSPEC,
+ IFLA_GTP_FD0,
+ IFLA_GTP_FD1,
+ IFLA_GTP_PDP_HASHSIZE,
+ __IFLA_GTP_MAX,
+};
+#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
+
/* Bonding section */
enum {
@@ -664,6 +688,7 @@ enum {
IFLA_VF_STATS_TX_BYTES,
IFLA_VF_STATS_BROADCAST,
IFLA_VF_STATS_MULTICAST,
+ IFLA_VF_STATS_PAD,
__IFLA_VF_STATS_MAX,
};
@@ -774,9 +799,46 @@ enum {
IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
+ IFLA_HSR_VERSION, /* HSR version */
__IFLA_HSR_MAX,
};
#define IFLA_HSR_MAX (__IFLA_HSR_MAX - 1)
+/* STATS section */
+
+struct if_stats_msg {
+ __u8 family;
+ __u8 pad1;
+ __u16 pad2;
+ __u32 ifindex;
+ __u32 filter_mask;
+};
+
+/* A stats attribute can be netdev specific or a global stat.
+ * For netdev stats, lets use the prefix IFLA_STATS_LINK_*
+ */
+enum {
+ IFLA_STATS_UNSPEC, /* also used as 64bit pad attribute */
+ IFLA_STATS_LINK_64,
+ IFLA_STATS_LINK_XSTATS,
+ __IFLA_STATS_MAX,
+};
+
+#define IFLA_STATS_MAX (__IFLA_STATS_MAX - 1)
+
+#define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
+
+/* These are embedded into IFLA_STATS_LINK_XSTATS:
+ * [IFLA_STATS_LINK_XSTATS]
+ * -> [LINK_XSTATS_TYPE_xxx]
+ * -> [rtnl link type specific attributes]
+ */
+enum {
+ LINK_XSTATS_TYPE_UNSPEC,
+ LINK_XSTATS_TYPE_BRIDGE,
+ __LINK_XSTATS_TYPE_MAX
+};
+#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 26b0d1e3e3e7..f7d4831a2cc7 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -19,8 +19,10 @@
#define MACSEC_MAX_KEY_LEN 128
-#define DEFAULT_CIPHER_ID 0x0080020001000001ULL
-#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
+#define MACSEC_KEYID_LEN 16
+
+#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
#define MACSEC_MIN_ICV_LEN 8
#define MACSEC_MAX_ICV_LEN 32
@@ -55,6 +57,7 @@ enum macsec_secy_attrs {
MACSEC_SECY_ATTR_INC_SCI,
MACSEC_SECY_ATTR_ES,
MACSEC_SECY_ATTR_SCB,
+ MACSEC_SECY_ATTR_PAD,
__MACSEC_SECY_ATTR_END,
NUM_MACSEC_SECY_ATTR = __MACSEC_SECY_ATTR_END,
MACSEC_SECY_ATTR_MAX = __MACSEC_SECY_ATTR_END - 1,
@@ -66,6 +69,7 @@ enum macsec_rxsc_attrs {
MACSEC_RXSC_ATTR_ACTIVE, /* config/dump, u8 0..1 */
MACSEC_RXSC_ATTR_SA_LIST, /* dump, nested */
MACSEC_RXSC_ATTR_STATS, /* dump, nested, macsec_rxsc_stats_attr */
+ MACSEC_RXSC_ATTR_PAD,
__MACSEC_RXSC_ATTR_END,
NUM_MACSEC_RXSC_ATTR = __MACSEC_RXSC_ATTR_END,
MACSEC_RXSC_ATTR_MAX = __MACSEC_RXSC_ATTR_END - 1,
@@ -77,8 +81,9 @@ enum macsec_sa_attrs {
MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
MACSEC_SA_ATTR_PN, /* config/dump, u32 */
MACSEC_SA_ATTR_KEY, /* config, data */
- MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */
+ MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */
MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */
+ MACSEC_SA_ATTR_PAD,
__MACSEC_SA_ATTR_END,
NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
@@ -110,6 +115,7 @@ enum macsec_rxsc_stats_attr {
MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ MACSEC_RXSC_STATS_ATTR_PAD,
__MACSEC_RXSC_STATS_ATTR_END,
NUM_MACSEC_RXSC_STATS_ATTR = __MACSEC_RXSC_STATS_ATTR_END,
MACSEC_RXSC_STATS_ATTR_MAX = __MACSEC_RXSC_STATS_ATTR_END - 1,
@@ -137,6 +143,7 @@ enum macsec_txsc_stats_attr {
MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+ MACSEC_TXSC_STATS_ATTR_PAD,
__MACSEC_TXSC_STATS_ATTR_END,
NUM_MACSEC_TXSC_STATS_ATTR = __MACSEC_TXSC_STATS_ATTR_END,
MACSEC_TXSC_STATS_ATTR_MAX = __MACSEC_TXSC_STATS_ATTR_END - 1,
@@ -153,6 +160,7 @@ enum macsec_secy_stats_attr {
MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+ MACSEC_SECY_STATS_ATTR_PAD,
__MACSEC_SECY_STATS_ATTR_END,
NUM_MACSEC_SECY_STATS_ATTR = __MACSEC_SECY_STATS_ATTR_END,
MACSEC_SECY_STATS_ATTR_MAX = __MACSEC_SECY_STATS_ATTR_END - 1,
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index abde7bbd6f3b..948c0a91e11b 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -14,6 +14,8 @@ enum {
ILA_ATTR_LOCATOR_MATCH, /* u64 */
ILA_ATTR_IFINDEX, /* s32 */
ILA_ATTR_DIR, /* u32 */
+ ILA_ATTR_PAD,
+ ILA_ATTR_CSUM_MODE, /* u8 */
__ILA_ATTR_MAX,
};
@@ -34,4 +36,10 @@ enum {
#define ILA_DIR_IN (1 << 0)
#define ILA_DIR_OUT (1 << 1)
+enum {
+ ILA_CSUM_ADJUST_TRANSPORT,
+ ILA_CSUM_NEUTRAL_MAP,
+ ILA_CSUM_NO_ACTION,
+};
+
#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 68a1f71fde9f..a16643705669 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -113,9 +113,13 @@ enum {
INET_DIAG_DCTCPINFO,
INET_DIAG_PROTOCOL, /* response attribute only */
INET_DIAG_SKV6ONLY,
+ INET_DIAG_LOCALS,
+ INET_DIAG_PEERS,
+ INET_DIAG_PAD,
+ __INET_DIAG_MAX,
};
-#define INET_DIAG_MAX INET_DIAG_SKV6ONLY
+#define INET_DIAG_MAX (__INET_DIAG_MAX - 1)
/* INET_DIAG_MEM */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 391395c06c7e..22d69894bc92 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -435,6 +435,7 @@ enum {
IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
IPVS_STATS_ATTR_INBPS, /* current in byte rate */
IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
+ IPVS_STATS_ATTR_PAD,
__IPVS_STATS_ATTR_MAX,
};
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 840cb990abe2..86eddd6241f3 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -12,6 +12,8 @@
#ifndef _LINUX_KEYCTL_H
#define _LINUX_KEYCTL_H
+#include <linux/types.h>
+
/* special process keyring shortcut IDs */
#define KEY_SPEC_THREAD_KEYRING -1 /* - key ID for thread-specific keyring */
#define KEY_SPEC_PROCESS_KEYRING -2 /* - key ID for process-specific keyring */
@@ -57,5 +59,13 @@
#define KEYCTL_INSTANTIATE_IOV 20 /* instantiate a partially constructed key */
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
+#define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */
+
+/* keyctl structures */
+struct keyctl_dh_params {
+ __s32 private;
+ __s32 prime;
+ __s32 base;
+};
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a7f1f8032ec1..05ebf475104c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -865,6 +865,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_SPAPR_TCE_64 125
#define KVM_CAP_ARM_PMU_V3 126
#define KVM_CAP_VCPU_ATTRIBUTES 127
+#define KVM_CAP_MAX_VCPU_ID 128
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 347ef22a964e..4bd27d0270a2 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -126,6 +126,7 @@ enum {
L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
L2TP_ATTR_UDP_ZERO_CSUM6_TX, /* u8 */
L2TP_ATTR_UDP_ZERO_CSUM6_RX, /* u8 */
+ L2TP_ATTR_PAD,
__L2TP_ATTR_MAX,
};
@@ -142,6 +143,7 @@ enum {
L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */
+ L2TP_ATTR_STATS_PAD,
__L2TP_ATTR_STATS_MAX,
};
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 7d024ceb075d..e4f048ee7043 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -51,6 +51,40 @@
/* We have included glibc headers... */
#if defined(__GLIBC__)
+/* Coordinate with glibc net/if.h header. */
+#if defined(_NET_IF_H) && defined(__USE_MISC)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+
+#define __UAPI_DEF_IF_IFCONF 0
+#define __UAPI_DEF_IF_IFMAP 0
+#define __UAPI_DEF_IF_IFNAMSIZ 0
+#define __UAPI_DEF_IF_IFREQ 0
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+#else /* _NET_IF_H */
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
+#endif /* _NET_IF_H */
+
/* Coordinate with glibc netinet/in.h header. */
#if defined(_NETINET_IN_H)
@@ -117,6 +151,16 @@
* that we need. */
#else /* !defined(__GLIBC__) */
+/* Definitions for if.h */
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
/* Definitions for in.h */
#define __UAPI_DEF_IN_ADDR 1
#define __UAPI_DEF_IN_IPPROTO 1
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index f8b01887a495..a478fe80e203 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -22,6 +22,7 @@ enum lwtunnel_ip_t {
LWTUNNEL_IP_TTL,
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
+ LWTUNNEL_IP_PAD,
__LWTUNNEL_IP_MAX,
};
@@ -35,6 +36,7 @@ enum lwtunnel_ip6_t {
LWTUNNEL_IP6_HOPLIMIT,
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
+ LWTUNNEL_IP6_PAD,
__LWTUNNEL_IP6_MAX,
};
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 0de181ad73d5..546b38886e11 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -78,5 +78,7 @@
#define BTRFS_TEST_MAGIC 0x73727279
#define NSFS_MAGIC 0x6e736673
#define BPF_FS_MAGIC 0xcafe4a11
+/* Since UDF 2.01 is ISO 13346 based... */
+#define UDF_SUPER_MAGIC 0x15013346
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 788655bfa0f3..bd99a8d80f36 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -128,6 +128,7 @@ enum {
NDTPA_LOCKTIME, /* u64, msecs */
NDTPA_QUEUE_LENBYTES, /* u32 */
NDTPA_MCAST_REPROBES, /* u32 */
+ NDTPA_PAD,
__NDTPA_MAX
};
#define NDTPA_MAX (__NDTPA_MAX - 1)
@@ -160,6 +161,7 @@ enum {
NDTA_PARMS, /* nested TLV NDTPA_* */
NDTA_STATS, /* struct ndt_stats, read-only */
NDTA_GC_INTERVAL, /* u64, msecs */
+ NDTA_PAD,
__NDTA_MAX
};
#define NDTA_MAX (__NDTA_MAX - 1)
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 6d1abea9746e..264e515de16f 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -31,6 +31,16 @@ enum {
SOF_TIMESTAMPING_LAST
};
+/*
+ * SO_TIMESTAMPING flags are either for recording a packet timestamp or for
+ * reporting the timestamp to user space.
+ * Recording flags can be set both via socket options and control messages.
+ */
+#define SOF_TIMESTAMPING_TX_RECORD_MASK (SOF_TIMESTAMPING_TX_HARDWARE | \
+ SOF_TIMESTAMPING_TX_SOFTWARE | \
+ SOF_TIMESTAMPING_TX_SCHED | \
+ SOF_TIMESTAMPING_TX_ACK)
+
/**
* struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter
*
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 63b2e34f1b60..ebb5154976de 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -118,6 +118,7 @@ enum {
IPSET_ATTR_SKBMARK,
IPSET_ATTR_SKBPRIO,
IPSET_ATTR_SKBQUEUE,
+ IPSET_ATTR_PAD,
__IPSET_ATTR_ADT_MAX,
};
#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index eeffde196f80..6a4dbe04f09e 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -3,6 +3,7 @@
#define NFT_TABLE_MAXNAMELEN 32
#define NFT_CHAIN_MAXNAMELEN 32
+#define NFT_SET_MAXNAMELEN 32
#define NFT_USERDATA_MAXLEN 256
/**
@@ -182,6 +183,7 @@ enum nft_chain_attributes {
NFTA_CHAIN_USE,
NFTA_CHAIN_TYPE,
NFTA_CHAIN_COUNTERS,
+ NFTA_CHAIN_PAD,
__NFTA_CHAIN_MAX
};
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
@@ -206,6 +208,7 @@ enum nft_rule_attributes {
NFTA_RULE_COMPAT,
NFTA_RULE_POSITION,
NFTA_RULE_USERDATA,
+ NFTA_RULE_PAD,
__NFTA_RULE_MAX
};
#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
@@ -308,6 +311,7 @@ enum nft_set_attributes {
NFTA_SET_TIMEOUT,
NFTA_SET_GC_INTERVAL,
NFTA_SET_USERDATA,
+ NFTA_SET_PAD,
__NFTA_SET_MAX
};
#define NFTA_SET_MAX (__NFTA_SET_MAX - 1)
@@ -341,6 +345,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_EXPIRATION,
NFTA_SET_ELEM_USERDATA,
NFTA_SET_ELEM_EXPR,
+ NFTA_SET_ELEM_PAD,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -584,6 +589,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_SREG_DATA,
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
+ NFTA_DYNSET_PAD,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -806,6 +812,7 @@ enum nft_limit_attributes {
NFTA_LIMIT_BURST,
NFTA_LIMIT_TYPE,
NFTA_LIMIT_FLAGS,
+ NFTA_LIMIT_PAD,
__NFTA_LIMIT_MAX
};
#define NFTA_LIMIT_MAX (__NFTA_LIMIT_MAX - 1)
@@ -820,6 +827,7 @@ enum nft_counter_attributes {
NFTA_COUNTER_UNSPEC,
NFTA_COUNTER_BYTES,
NFTA_COUNTER_PACKETS,
+ NFTA_COUNTER_PAD,
__NFTA_COUNTER_MAX
};
#define NFTA_COUNTER_MAX (__NFTA_COUNTER_MAX - 1)
@@ -1055,6 +1063,7 @@ enum nft_trace_attibutes {
NFTA_TRACE_MARK,
NFTA_TRACE_NFPROTO,
NFTA_TRACE_POLICY,
+ NFTA_TRACE_PAD,
__NFTA_TRACE_MAX
};
#define NFTA_TRACE_MAX (__NFTA_TRACE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h
index f3e34dbbf966..36047ec70f37 100644
--- a/include/uapi/linux/netfilter/nfnetlink_acct.h
+++ b/include/uapi/linux/netfilter/nfnetlink_acct.h
@@ -29,6 +29,7 @@ enum nfnl_acct_type {
NFACCT_FLAGS,
NFACCT_QUOTA,
NFACCT_FILTER,
+ NFACCT_PAD,
__NFACCT_MAX
};
#define NFACCT_MAX (__NFACCT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index c1a4e1441a25..9df789709abe 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -116,6 +116,7 @@ enum ctattr_protoinfo_dccp {
CTA_PROTOINFO_DCCP_STATE,
CTA_PROTOINFO_DCCP_ROLE,
CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+ CTA_PROTOINFO_DCCP_PAD,
__CTA_PROTOINFO_DCCP_MAX,
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
@@ -135,6 +136,7 @@ enum ctattr_counters {
CTA_COUNTERS_BYTES, /* 64bit counters */
CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */
CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */
+ CTA_COUNTERS_PAD,
__CTA_COUNTERS_MAX
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
@@ -143,6 +145,7 @@ enum ctattr_tstamp {
CTA_TIMESTAMP_UNSPEC,
CTA_TIMESTAMP_START,
CTA_TIMESTAMP_STOP,
+ CTA_TIMESTAMP_PAD,
__CTA_TIMESTAMP_MAX
};
#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index b67a853638ff..ae30841ff94e 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -30,6 +30,14 @@ struct nfqnl_msg_packet_timestamp {
__aligned_be64 usec;
};
+enum nfqnl_vlan_attr {
+ NFQA_VLAN_UNSPEC,
+ NFQA_VLAN_PROTO, /* __be16 skb vlan_proto */
+ NFQA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */
+ __NFQA_VLAN_MAX,
+};
+#define NFQA_VLAN_MAX (__NFQA_VLAN_MAX + 1)
+
enum nfqnl_attr_type {
NFQA_UNSPEC,
NFQA_PACKET_HDR,
@@ -50,6 +58,8 @@ enum nfqnl_attr_type {
NFQA_UID, /* __u32 sk uid */
NFQA_GID, /* __u32 sk gid */
NFQA_SECCTX, /* security context string */
+ NFQA_VLAN, /* nested attribute: packet vlan info */
+ NFQA_L2HDR, /* full L2 header */
__NFQA_MAX
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5a30a7563633..e23d78685a01 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -322,7 +322,9 @@
* @NL80211_CMD_GET_SCAN: get scan results
* @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
* %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
- * probe requests at CCK rate or not.
+ * probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to
+ * specify a BSSID to scan for; if not included, the wildcard BSSID will
+ * be used.
* @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
* NL80211_CMD_GET_SCAN and on the "scan" multicast group)
* @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
@@ -427,7 +429,11 @@
* @NL80211_CMD_ASSOCIATE: association request and notification; like
* NL80211_CMD_AUTHENTICATE but for Association and Reassociation
* (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
- * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
+ * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). The
+ * %NL80211_ATTR_PREV_BSSID attribute is used to specify whether the
+ * request is for the initial association to an ESS (that attribute not
+ * included) or for reassociation within the ESS (that attribute is
+ * included).
* @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
* NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
* MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
@@ -477,6 +483,9 @@
* set of BSSID,frequency parameters is used (i.e., either the enforcing
* %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
* %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
+ * %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within
+ * the ESS in case the device is already associated and an association with
+ * a different BSS is desired.
* Background scan period can optionally be
* specified in %NL80211_ATTR_BG_SCAN_PERIOD,
* if not specified default background scan configuration
@@ -1285,8 +1294,11 @@ enum nl80211_commands {
* @NL80211_ATTR_RESP_IE: (Re)association response information elements as
* sent by peer, for ROAM and successful CONNECT events.
*
- * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE
- * commands to specify using a reassociate frame
+ * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used in ASSOCIATE and CONNECT
+ * commands to specify a request to reassociate within an ESS, i.e., to use
+ * Reassociate Request frame (with the value of this attribute in the
+ * Current AP address field) instead of Association Request frame which is
+ * used for the initial association to an ESS.
*
* @NL80211_ATTR_KEY: key information in a nested attribute with
* %NL80211_KEY_* sub-attributes
@@ -1795,6 +1807,17 @@ enum nl80211_commands {
* in a PBSS. Specified in %NL80211_CMD_CONNECT to request
* connecting to a PCP, and in %NL80211_CMD_START_AP to start
* a PCP instead of AP. Relevant for DMG networks only.
+ * @NL80211_ATTR_BSS_SELECT: nested attribute for driver supporting the
+ * BSS selection feature. When used with %NL80211_CMD_GET_WIPHY it contains
+ * attributes according &enum nl80211_bss_select_attr to indicate what
+ * BSS selection behaviours are supported. When used with %NL80211_CMD_CONNECT
+ * it contains the behaviour-specific attribute containing the parameters for
+ * BSS selection to be done by driver and/or firmware.
+ *
+ * @NL80211_ATTR_STA_SUPPORT_P2P_PS: whether P2P PS mechanism supported
+ * or not. u8, one of the values of &enum nl80211_sta_p2p_ps_status
+ *
+ * @NL80211_ATTR_PAD: attribute used for padding for 64-bit alignment
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2172,6 +2195,12 @@ enum nl80211_attrs {
NL80211_ATTR_PBSS,
+ NL80211_ATTR_BSS_SELECT,
+
+ NL80211_ATTR_STA_SUPPORT_P2P_PS,
+
+ NL80211_ATTR_PAD,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2315,6 +2344,20 @@ enum nl80211_sta_flags {
NL80211_STA_FLAG_MAX = __NL80211_STA_FLAG_AFTER_LAST - 1
};
+/**
+ * enum nl80211_sta_p2p_ps_status - station support of P2P PS
+ *
+ * @NL80211_P2P_PS_UNSUPPORTED: station doesn't support P2P PS mechanism
+ * @@NL80211_P2P_PS_SUPPORTED: station supports P2P PS mechanism
+ * @NUM_NL80211_P2P_PS_STATUS: number of values
+ */
+enum nl80211_sta_p2p_ps_status {
+ NL80211_P2P_PS_UNSUPPORTED = 0,
+ NL80211_P2P_PS_SUPPORTED,
+
+ NUM_NL80211_P2P_PS_STATUS,
+};
+
#define NL80211_STA_FLAG_MAX_OLD_API NL80211_STA_FLAG_TDLS_PEER
/**
@@ -2472,6 +2515,9 @@ enum nl80211_sta_bss_param {
* TID+1 and the special TID 16 (i.e. value 17) is used for non-QoS frames;
* each one of those is again nested with &enum nl80211_tid_stats
* attributes carrying the actual values.
+ * @NL80211_STA_INFO_RX_DURATION: aggregate PPDU duration for all frames
+ * received from the station (u64, usec)
+ * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -2508,6 +2554,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_BEACON_RX,
NL80211_STA_INFO_BEACON_SIGNAL_AVG,
NL80211_STA_INFO_TID_STATS,
+ NL80211_STA_INFO_RX_DURATION,
+ NL80211_STA_INFO_PAD,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -2524,6 +2572,7 @@ enum nl80211_sta_info {
* transmitted MSDUs (not counting the first attempt; u64)
* @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
* MSDUs (u64)
+ * @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment
* @NUM_NL80211_TID_STATS: number of attributes here
* @NL80211_TID_STATS_MAX: highest numbered attribute here
*/
@@ -2533,6 +2582,7 @@ enum nl80211_tid_stats {
NL80211_TID_STATS_TX_MSDU,
NL80211_TID_STATS_TX_MSDU_RETRIES,
NL80211_TID_STATS_TX_MSDU_FAILED,
+ NL80211_TID_STATS_PAD,
/* keep last */
NUM_NL80211_TID_STATS,
@@ -2969,6 +3019,7 @@ enum nl80211_user_reg_hint_type {
* transmitting data (on channel or globally)
* @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan
* (on this channel or globally)
+ * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
@@ -2984,6 +3035,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_RX,
NL80211_SURVEY_INFO_TIME_TX,
NL80211_SURVEY_INFO_TIME_SCAN,
+ NL80211_SURVEY_INFO_PAD,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
@@ -3409,6 +3461,7 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry
* was last updated by a received frame. The value is expected to be
* accurate to about 10ms. (u64, nanoseconds)
+ * @NL80211_BSS_PAD: attribute used for padding for 64-bit alignment
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -3429,6 +3482,7 @@ enum nl80211_bss {
NL80211_BSS_BEACON_TSF,
NL80211_BSS_PRESP_DATA,
NL80211_BSS_LAST_SEEN_BOOTTIME,
+ NL80211_BSS_PAD,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -3614,11 +3668,15 @@ enum nl80211_txrate_gi {
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
* @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
+ * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
+ * since newer kernel versions may support more bands
*/
enum nl80211_band {
NL80211_BAND_2GHZ,
NL80211_BAND_5GHZ,
NL80211_BAND_60GHZ,
+
+ NUM_NL80211_BANDS,
};
/**
@@ -4665,4 +4723,48 @@ enum nl80211_sched_scan_plan {
__NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1
};
+/**
+ * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters.
+ *
+ * @band: band of BSS that must match for RSSI value adjustment.
+ * @delta: value used to adjust the RSSI value of matching BSS.
+ */
+struct nl80211_bss_select_rssi_adjust {
+ __u8 band;
+ __s8 delta;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_bss_select_attr - attributes for bss selection.
+ *
+ * @__NL80211_BSS_SELECT_ATTR_INVALID: reserved.
+ * @NL80211_BSS_SELECT_ATTR_RSSI: Flag indicating only RSSI-based BSS selection
+ * is requested.
+ * @NL80211_BSS_SELECT_ATTR_BAND_PREF: attribute indicating BSS
+ * selection should be done such that the specified band is preferred.
+ * When there are multiple BSS-es in the preferred band, the driver
+ * shall use RSSI-based BSS selection as a second step. The value of
+ * this attribute is according to &enum nl80211_band (u32).
+ * @NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: When present the RSSI level for
+ * BSS-es in the specified band is to be adjusted before doing
+ * RSSI-based BSS selection. The attribute value is a packed structure
+ * value as specified by &struct nl80211_bss_select_rssi_adjust.
+ * @NL80211_BSS_SELECT_ATTR_MAX: highest bss select attribute number.
+ * @__NL80211_BSS_SELECT_ATTR_AFTER_LAST: internal use.
+ *
+ * One and only one of these attributes are found within %NL80211_ATTR_BSS_SELECT
+ * for %NL80211_CMD_CONNECT. It specifies the required BSS selection behaviour
+ * which the driver shall use.
+ */
+enum nl80211_bss_select_attr {
+ __NL80211_BSS_SELECT_ATTR_INVALID,
+ NL80211_BSS_SELECT_ATTR_RSSI,
+ NL80211_BSS_SELECT_ATTR_BAND_PREF,
+ NL80211_BSS_SELECT_ATTR_RSSI_ADJUST,
+
+ /* keep last */
+ __NL80211_BSS_SELECT_ATTR_AFTER_LAST,
+ NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 616d04761730..bb0d515b7654 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -84,6 +84,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_STATS, /* struct ovs_dp_stats */
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
+ OVS_DP_ATTR_PAD,
__OVS_DP_ATTR_MAX
};
@@ -253,6 +254,7 @@ enum ovs_vport_attr {
OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
/* receiving upcalls */
OVS_VPORT_ATTR_STATS, /* struct ovs_vport_stats */
+ OVS_VPORT_ATTR_PAD,
__OVS_VPORT_ATTR_MAX
};
@@ -351,6 +353,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */
OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */
OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
+ OVS_TUNNEL_KEY_ATTR_PAD,
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -518,6 +521,7 @@ enum ovs_flow_attr {
* logging should be suppressed. */
OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */
OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */
+ OVS_FLOW_ATTR_PAD,
__OVS_FLOW_ATTR_MAX
};
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 1becea86c73c..404095124ae2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -670,7 +670,8 @@
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
+#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DPC
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -946,4 +947,21 @@
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+/* Downstream Port Containment */
+#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */
+#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */
+#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */
+#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
+
+#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */
+#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */
+
+#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */
+#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */
+
+#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 1afe9623c1a7..43fc8d213472 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -340,7 +340,8 @@ struct perf_event_attr {
comm_exec : 1, /* flag comm events that are due to an exec */
use_clockid : 1, /* use @clockid for time fields */
context_switch : 1, /* context switch data */
- __reserved_1 : 37;
+ write_backward : 1, /* Write ring buffer from end to beginning */
+ __reserved_1 : 36;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -401,6 +402,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index c43c5f78b9c4..eba5914ba5d1 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -66,6 +66,7 @@ enum {
TCA_ACT_OPTIONS,
TCA_ACT_INDEX,
TCA_ACT_STATS,
+ TCA_ACT_PAD,
__TCA_ACT_MAX
};
@@ -150,6 +151,10 @@ enum {
#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW (1 << 0)
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
+
/* U32 filters */
#define TC_U32_HTID(h) ((h)&0xFFF00000)
@@ -173,6 +178,7 @@ enum {
TCA_U32_PCNT,
TCA_U32_MARK,
TCA_U32_FLAGS,
+ TCA_U32_PAD,
__TCA_U32_MAX
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 8cb18b44968e..2382eed50278 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -179,6 +179,7 @@ enum {
TCA_TBF_PRATE64,
TCA_TBF_BURST,
TCA_TBF_PBURST,
+ TCA_TBF_PAD,
__TCA_TBF_MAX,
};
@@ -368,6 +369,7 @@ enum {
TCA_HTB_DIRECT_QLEN,
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
+ TCA_HTB_PAD,
__TCA_HTB_MAX,
};
@@ -531,6 +533,7 @@ enum {
TCA_NETEM_RATE,
TCA_NETEM_ECN,
TCA_NETEM_RATE64,
+ TCA_NETEM_PAD,
__TCA_NETEM_MAX,
};
@@ -715,6 +718,8 @@ enum {
TCA_FQ_CODEL_FLOWS,
TCA_FQ_CODEL_QUANTUM,
TCA_FQ_CODEL_CE_THRESHOLD,
+ TCA_FQ_CODEL_DROP_BATCH_SIZE,
+ TCA_FQ_CODEL_MEMORY_LIMIT,
__TCA_FQ_CODEL_MAX
};
@@ -739,6 +744,8 @@ struct tc_fq_codel_qd_stats {
__u32 new_flows_len; /* count of flows in new list */
__u32 old_flows_len; /* count of flows in old list */
__u32 ce_mark; /* packets above ce_threshold */
+ __u32 memory_usage; /* in bytes */
+ __u32 drop_overmemory;
};
struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/qrtr.h b/include/uapi/linux/qrtr.h
new file mode 100644
index 000000000000..66c0748d26e2
--- /dev/null
+++ b/include/uapi/linux/qrtr.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_QRTR_H
+#define _LINUX_QRTR_H
+
+#include <linux/socket.h>
+
+struct sockaddr_qrtr {
+ __kernel_sa_family_t sq_family;
+ __u32 sq_node;
+ __u32 sq_port;
+};
+
+#endif /* _LINUX_QRTR_H */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index 38baddb807f5..4d2489ef6f10 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -191,6 +191,7 @@ enum {
QUOTA_NL_A_DEV_MAJOR,
QUOTA_NL_A_DEV_MINOR,
QUOTA_NL_A_CAUSED_ID,
+ QUOTA_NL_A_PAD,
__QUOTA_NL_A_MAX,
};
#define QUOTA_NL_A_MAX (__QUOTA_NL_A_MAX - 1)
diff --git a/include/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
index b65d19df76d2..5796bf1d06ad 100644
--- a/include/linux/rio_mport_cdev.h
+++ b/include/uapi/linux/rio_mport_cdev.h
@@ -39,16 +39,16 @@
#ifndef _RIO_MPORT_CDEV_H_
#define _RIO_MPORT_CDEV_H_
-#ifndef __user
-#define __user
-#endif
+#include <linux/ioctl.h>
+#include <linux/types.h>
struct rio_mport_maint_io {
- uint32_t rioid; /* destID of remote device */
- uint32_t hopcount; /* hopcount to remote device */
- uint32_t offset; /* offset in register space */
- size_t length; /* length in bytes */
- void __user *buffer; /* data buffer */
+ __u16 rioid; /* destID of remote device */
+ __u8 hopcount; /* hopcount to remote device */
+ __u8 pad0[5];
+ __u32 offset; /* offset in register space */
+ __u32 length; /* length in bytes */
+ __u64 buffer; /* pointer to data buffer */
};
/*
@@ -66,22 +66,23 @@ struct rio_mport_maint_io {
#define RIO_CAP_MAP_INB (1 << 7)
struct rio_mport_properties {
- uint16_t hdid;
- uint8_t id; /* Physical port ID */
- uint8_t index;
- uint32_t flags;
- uint32_t sys_size; /* Default addressing size */
- uint8_t port_ok;
- uint8_t link_speed;
- uint8_t link_width;
- uint32_t dma_max_sge;
- uint32_t dma_max_size;
- uint32_t dma_align;
- uint32_t transfer_mode; /* Default transfer mode */
- uint32_t cap_sys_size; /* Capable system sizes */
- uint32_t cap_addr_size; /* Capable addressing sizes */
- uint32_t cap_transfer_mode; /* Capable transfer modes */
- uint32_t cap_mport; /* Mport capabilities */
+ __u16 hdid;
+ __u8 id; /* Physical port ID */
+ __u8 index;
+ __u32 flags;
+ __u32 sys_size; /* Default addressing size */
+ __u8 port_ok;
+ __u8 link_speed;
+ __u8 link_width;
+ __u8 pad0;
+ __u32 dma_max_sge;
+ __u32 dma_max_size;
+ __u32 dma_align;
+ __u32 transfer_mode; /* Default transfer mode */
+ __u32 cap_sys_size; /* Capable system sizes */
+ __u32 cap_addr_size; /* Capable addressing sizes */
+ __u32 cap_transfer_mode; /* Capable transfer modes */
+ __u32 cap_mport; /* Mport capabilities */
};
/*
@@ -93,54 +94,57 @@ struct rio_mport_properties {
#define RIO_PORTWRITE (1 << 1)
struct rio_doorbell {
- uint32_t rioid;
- uint16_t payload;
+ __u16 rioid;
+ __u16 payload;
};
struct rio_doorbell_filter {
- uint32_t rioid; /* 0xffffffff to match all ids */
- uint16_t low;
- uint16_t high;
+ __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */
+ __u16 low;
+ __u16 high;
+ __u16 pad0;
};
struct rio_portwrite {
- uint32_t payload[16];
+ __u32 payload[16];
};
struct rio_pw_filter {
- uint32_t mask;
- uint32_t low;
- uint32_t high;
+ __u32 mask;
+ __u32 low;
+ __u32 high;
+ __u32 pad0;
};
/* RapidIO base address for inbound requests set to value defined below
* indicates that no specific RIO-to-local address translation is requested
* and driver should use direct (one-to-one) address mapping.
*/
-#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0))
+#define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0))
struct rio_mmap {
- uint32_t rioid;
- uint64_t rio_addr;
- uint64_t length;
- uint64_t handle;
- void *address;
+ __u16 rioid;
+ __u16 pad0[3];
+ __u64 rio_addr;
+ __u64 length;
+ __u64 handle;
+ __u64 address;
};
struct rio_dma_mem {
- uint64_t length; /* length of DMA memory */
- uint64_t dma_handle; /* handle associated with this memory */
- void *buffer; /* pointer to this memory */
+ __u64 length; /* length of DMA memory */
+ __u64 dma_handle; /* handle associated with this memory */
+ __u64 address;
};
-
struct rio_event {
- unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
+ __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */
union {
struct rio_doorbell doorbell; /* header for RIO_DOORBELL */
struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
} u;
+ __u32 pad0;
};
enum rio_transfer_sync {
@@ -184,35 +188,37 @@ enum rio_exchange {
};
struct rio_transfer_io {
- uint32_t rioid; /* Target destID */
- uint64_t rio_addr; /* Address in target's RIO mem space */
- enum rio_exchange method; /* Data exchange method */
- void __user *loc_addr;
- uint64_t handle;
- uint64_t offset; /* Offset in buffer */
- uint64_t length; /* Length in bytes */
- uint32_t completion_code; /* Completion code for this transfer */
+ __u64 rio_addr; /* Address in target's RIO mem space */
+ __u64 loc_addr;
+ __u64 handle;
+ __u64 offset; /* Offset in buffer */
+ __u64 length; /* Length in bytes */
+ __u16 rioid; /* Target destID */
+ __u16 method; /* Data exchange method, one of rio_exchange enum */
+ __u32 completion_code; /* Completion code for this transfer */
};
struct rio_transaction {
- uint32_t transfer_mode; /* Data transfer mode */
- enum rio_transfer_sync sync; /* Synchronization method */
- enum rio_transfer_dir dir; /* Transfer direction */
- size_t count; /* Number of transfers */
- struct rio_transfer_io __user *block; /* Array of <count> transfers */
+ __u64 block; /* Pointer to array of <count> transfers */
+ __u32 count; /* Number of transfers */
+ __u32 transfer_mode; /* Data transfer mode */
+ __u16 sync; /* Synch method, one of rio_transfer_sync enum */
+ __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */
+ __u32 pad0;
};
struct rio_async_tx_wait {
- uint32_t token; /* DMA transaction ID token */
- uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */
+ __u32 token; /* DMA transaction ID token */
+ __u32 timeout; /* Wait timeout in msec, if 0 use default TO */
};
#define RIO_MAX_DEVNAME_SZ 20
struct rio_rdev_info {
- uint32_t destid;
- uint8_t hopcount;
- uint32_t comptag;
+ __u16 destid;
+ __u8 hopcount;
+ __u8 pad0;
+ __u32 comptag;
char name[RIO_MAX_DEVNAME_SZ + 1];
};
@@ -220,11 +226,11 @@ struct rio_rdev_info {
#define RIO_MPORT_DRV_MAGIC 'm'
#define RIO_MPORT_MAINT_HDID_SET \
- _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
#define RIO_MPORT_MAINT_COMPTAG_SET \
- _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
#define RIO_MPORT_MAINT_PORT_IDX_GET \
- _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
+ _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
#define RIO_MPORT_GET_PROPERTIES \
_IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
#define RIO_MPORT_MAINT_READ_LOCAL \
@@ -244,9 +250,9 @@ struct rio_rdev_info {
#define RIO_DISABLE_PORTWRITE_RANGE \
_IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
#define RIO_SET_EVENT_MASK \
- _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
+ _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
#define RIO_GET_EVENT_MASK \
- _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
+ _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
#define RIO_MAP_OUTBOUND \
_IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
#define RIO_UNMAP_OUTBOUND \
@@ -254,11 +260,11 @@ struct rio_rdev_info {
#define RIO_MAP_INBOUND \
_IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
#define RIO_UNMAP_INBOUND \
- _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
#define RIO_ALLOC_DMA \
_IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
#define RIO_FREE_DMA \
- _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
+ _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
#define RIO_TRANSFER \
_IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
#define RIO_WAIT_FOR_ASYNC \
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index ca764b5da86d..262f0379d83a 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -139,6 +139,11 @@ enum {
RTM_GETNSID = 90,
#define RTM_GETNSID RTM_GETNSID
+ RTM_NEWSTATS = 92,
+#define RTM_NEWSTATS RTM_NEWSTATS
+ RTM_GETSTATS = 94,
+#define RTM_GETSTATS RTM_GETSTATS
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -312,6 +317,7 @@ enum rtattr_type_t {
RTA_ENCAP_TYPE,
RTA_ENCAP,
RTA_EXPIRES,
+ RTA_PAD,
__RTA_MAX
};
@@ -536,6 +542,7 @@ enum {
TCA_FCNT,
TCA_STATS2,
TCA_STAB,
+ TCA_PAD,
__TCA_MAX
};
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index e513a4ee369b..24da334af7f1 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -264,4 +264,7 @@
/* MVEBU UART */
#define PORT_MVEBU 114
+/* Microchip PIC32 UART */
+#define PORT_PIC32 115
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/signal.h b/include/uapi/linux/signal.h
index e1bd50c29ded..cd0804b6bfa2 100644
--- a/include/uapi/linux/signal.h
+++ b/include/uapi/linux/signal.h
@@ -7,4 +7,9 @@
#define SS_ONSTACK 1
#define SS_DISABLE 2
+/* bit-flags */
+#define SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
+/* mask for all SS_xxx flags */
+#define SS_FLAG_BITS SS_AUTODISARM
+
#endif /* _UAPI_LINUX_SIGNAL_H */
diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h
index bae2d80034d4..7ff505d8a47b 100644
--- a/include/uapi/linux/sock_diag.h
+++ b/include/uapi/linux/sock_diag.h
@@ -20,6 +20,7 @@ enum {
SK_MEMINFO_WMEM_QUEUED,
SK_MEMINFO_OPTMEM,
SK_MEMINFO_BACKLOG,
+ SK_MEMINFO_DROPS,
SK_MEMINFO_VARS,
};
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index aa9f10428743..621fa8ac4425 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1 +1,5 @@
#include <linux/compiler.h>
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 3f10e5317b46..8f3a8f606fd9 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,9 +45,7 @@
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
-#ifdef __HAVE_BUILTIN_BSWAP16__
- return __builtin_bswap16(val);
-#elif defined (__arch_swab16)
+#if defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
-#ifdef __HAVE_BUILTIN_BSWAP32__
- return __builtin_bswap32(val);
-#elif defined(__arch_swab32)
+#if defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
-#ifdef __HAVE_BUILTIN_BSWAP64__
- return __builtin_bswap64(val);
-#elif defined (__arch_swab64)
+#if defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
+#ifdef __HAVE_BUILTIN_BSWAP16__
+#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
+#else
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
+#endif
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
+#ifdef __HAVE_BUILTIN_BSWAP32__
+#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
+#else
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
+#endif
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
+#ifdef __HAVE_BUILTIN_BSWAP64__
+#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
+#else
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
+#endif
/**
* __swahw32 - return a word-swapped 32-bit value
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index 242cf0c6e33d..e3969bd939e4 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -10,3 +10,4 @@ header-y += tc_skbedit.h
header-y += tc_vlan.h
header-y += tc_bpf.h
header-y += tc_connmark.h
+header-y += tc_ife.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index 07f17cc70bb3..063d9d465119 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -26,6 +26,7 @@ enum {
TCA_ACT_BPF_OPS,
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
+ TCA_ACT_BPF_PAD,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_connmark.h b/include/uapi/linux/tc_act/tc_connmark.h
index 994b0971bce2..62a5e944c554 100644
--- a/include/uapi/linux/tc_act/tc_connmark.h
+++ b/include/uapi/linux/tc_act/tc_connmark.h
@@ -15,6 +15,7 @@ enum {
TCA_CONNMARK_UNSPEC,
TCA_CONNMARK_PARMS,
TCA_CONNMARK_TM,
+ TCA_CONNMARK_PAD,
__TCA_CONNMARK_MAX
};
#define TCA_CONNMARK_MAX (__TCA_CONNMARK_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_csum.h b/include/uapi/linux/tc_act/tc_csum.h
index a047c49a3153..8ac8041ab5f1 100644
--- a/include/uapi/linux/tc_act/tc_csum.h
+++ b/include/uapi/linux/tc_act/tc_csum.h
@@ -10,6 +10,7 @@ enum {
TCA_CSUM_UNSPEC,
TCA_CSUM_PARMS,
TCA_CSUM_TM,
+ TCA_CSUM_PAD,
__TCA_CSUM_MAX
};
#define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_defact.h b/include/uapi/linux/tc_act/tc_defact.h
index 17dddb40f740..d2a3abb77aeb 100644
--- a/include/uapi/linux/tc_act/tc_defact.h
+++ b/include/uapi/linux/tc_act/tc_defact.h
@@ -12,6 +12,7 @@ enum {
TCA_DEF_TM,
TCA_DEF_PARMS,
TCA_DEF_DATA,
+ TCA_DEF_PAD,
__TCA_DEF_MAX
};
#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_gact.h b/include/uapi/linux/tc_act/tc_gact.h
index f7bf94eed510..70b536a8f8b2 100644
--- a/include/uapi/linux/tc_act/tc_gact.h
+++ b/include/uapi/linux/tc_act/tc_gact.h
@@ -25,6 +25,7 @@ enum {
TCA_GACT_TM,
TCA_GACT_PARMS,
TCA_GACT_PROB,
+ TCA_GACT_PAD,
__TCA_GACT_MAX
};
#define TCA_GACT_MAX (__TCA_GACT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index d648ff66586f..4ece02a77b9a 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -23,6 +23,7 @@ enum {
TCA_IFE_SMAC,
TCA_IFE_TYPE,
TCA_IFE_METALST,
+ TCA_IFE_PAD,
__TCA_IFE_MAX
};
#define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
index 130aaadf6fac..7c6e155dd981 100644
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ b/include/uapi/linux/tc_act/tc_ipt.h
@@ -14,6 +14,7 @@ enum {
TCA_IPT_CNT,
TCA_IPT_TM,
TCA_IPT_TARG,
+ TCA_IPT_PAD,
__TCA_IPT_MAX
};
#define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 7561750e8fd6..3d7a2b352a62 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -20,6 +20,7 @@ enum {
TCA_MIRRED_UNSPEC,
TCA_MIRRED_TM,
TCA_MIRRED_PARMS,
+ TCA_MIRRED_PAD,
__TCA_MIRRED_MAX
};
#define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_nat.h b/include/uapi/linux/tc_act/tc_nat.h
index 6663aeba0b9a..923457c9ebf0 100644
--- a/include/uapi/linux/tc_act/tc_nat.h
+++ b/include/uapi/linux/tc_act/tc_nat.h
@@ -10,6 +10,7 @@ enum {
TCA_NAT_UNSPEC,
TCA_NAT_PARMS,
TCA_NAT_TM,
+ TCA_NAT_PAD,
__TCA_NAT_MAX
};
#define TCA_NAT_MAX (__TCA_NAT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 716cfabcd5b2..6389959a5157 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -10,6 +10,7 @@ enum {
TCA_PEDIT_UNSPEC,
TCA_PEDIT_TM,
TCA_PEDIT_PARMS,
+ TCA_PEDIT_PAD,
__TCA_PEDIT_MAX
};
#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index 7a2e910a5f08..fecb5cc48c40 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -39,6 +39,7 @@ enum {
TCA_SKBEDIT_PRIORITY,
TCA_SKBEDIT_QUEUE_MAPPING,
TCA_SKBEDIT_MARK,
+ TCA_SKBEDIT_PAD,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index f7b8d448b960..31151ff6264f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -28,6 +28,7 @@ enum {
TCA_VLAN_PARMS,
TCA_VLAN_PUSH_VLAN_ID,
TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ TCA_VLAN_PAD,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h
index 93533926035c..80ad90d0cfc2 100644
--- a/include/uapi/linux/tcp_metrics.h
+++ b/include/uapi/linux/tcp_metrics.h
@@ -40,6 +40,7 @@ enum {
TCP_METRICS_ATTR_FOPEN_COOKIE, /* binary */
TCP_METRICS_ATTR_SADDR_IPV4, /* u32 */
TCP_METRICS_ATTR_SADDR_IPV6, /* binary */
+ TCP_METRICS_ATTR_PAD,
__TCP_METRICS_ATTR_MAX,
};
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 16574ea18f0c..2c8180f9156f 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -36,6 +36,7 @@ struct udphdr {
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
-
+#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
+#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 06d6c6228a7a..d5ce71607972 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -899,7 +899,7 @@ struct usb_ssp_cap_descriptor {
__le32 bmAttributes;
#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */
#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */
- __u16 wFunctionalitySupport;
+ __le16 wFunctionalitySupport;
#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf)
#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index e895975c5b0e..8f951917be74 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -138,10 +138,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
-#if 1
- /* Experimental */
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
-#endif
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
@@ -657,8 +654,7 @@ struct v4l2_fmtdesc {
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
-#if 1
- /* Experimental Frame Size and frame rate enumeration */
+ /* Frame Size and frame rate enumeration */
/*
* F R A M E S I Z E E N U M E R A T I O N
*/
@@ -724,7 +720,6 @@ struct v4l2_frmivalenum {
__u32 reserved[2]; /* Reserved space for future use */
};
-#endif
/*
* T I M E C O D E
@@ -1728,8 +1723,6 @@ struct v4l2_audioout {
/*
* M P E G S E R V I C E S
- *
- * NOTE: EXPERIMENTAL API
*/
#if 1
#define V4L2_ENC_IDX_FRAME_I (0)
@@ -2259,46 +2252,35 @@ struct v4l2_create_buffers {
#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd)
#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd)
-/* Experimental, meant for debugging, testing and internal use.
- Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
- You must be root to use these ioctls. Never use these in applications! */
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
+ * You must be root to use these ioctls. Never use these in applications!
+ */
#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
-
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
-
-/* Experimental, the below two ioctls may change over the next couple of kernel
- versions */
#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers)
#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer)
-
-/* Experimental selection API */
#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection)
#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection)
-
-/* Experimental, these two ioctls may change over the next couple of kernel
- versions. */
#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd)
#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd)
-
-/* Experimental, these three ioctls may change over the next couple of kernel
- versions. */
#define VIDIOC_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
-
-/* Experimental, this ioctl may change over the next couple of kernel
- versions. */
#define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band)
-/* Experimental, meant for debugging, testing and internal use.
- Never use these in applications! */
+/*
+ * Experimental, meant for debugging, testing and internal use.
+ * Never use this in applications!
+ */
#define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info)
#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl)
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index c18264df9504..4cb65bbfa654 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -40,6 +40,8 @@
#define VIRTIO_CONFIG_S_DRIVER_OK 4
/* Driver has finished configuring features */
#define VIRTIO_CONFIG_S_FEATURES_OK 8
+/* Device entered invalid state, driver must reset it */
+#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40
/* We've given up on this device. */
#define VIRTIO_CONFIG_S_FAILED 0x80
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 2cd9e608d0d1..143338978b48 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -302,6 +302,7 @@ enum xfrm_attr_type_t {
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
+ XFRMA_PAD,
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 8126c143a519..b6543d73d20a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -226,6 +226,7 @@ struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_odp_caps odp_caps;
__u64 timestamp_mask;
__u64 hca_core_clock; /* in KHZ */
+ __u64 device_cap_flags_ex;
};
struct ib_uverbs_query_port {
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 67bf49d8c944..609cadb8739d 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -672,7 +672,7 @@ enum {
/* global timers (device member) */
#define SNDRV_TIMER_GLOBAL_SYSTEM 0
-#define SNDRV_TIMER_GLOBAL_RTC 1
+#define SNDRV_TIMER_GLOBAL_RTC 1 /* unused */
#define SNDRV_TIMER_GLOBAL_HPET 2
#define SNDRV_TIMER_GLOBAL_HRTIMER 3
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index eeba75395f7d..ad66589f2ae6 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -194,8 +194,9 @@ int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
- u32 pixel_format, int stride,
- int u_offset, int v_offset);
+ unsigned int uv_stride,
+ unsigned int u_offset,
+ unsigned int v_offset);
void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
u32 pixel_format, int stride, int height);
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
@@ -236,7 +237,7 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
unsigned long bandwidth_mbs, int burstsize);
void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width);
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
void ipu_dmfc_put(struct dmfc_channel *dmfc);
diff --git a/include/video/sh_mipi_dsi.h b/include/video/sh_mipi_dsi.h
deleted file mode 100644
index a01f197e6ac1..000000000000
--- a/include/video/sh_mipi_dsi.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Public SH-mobile MIPI DSI header
- *
- * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef VIDEO_SH_MIPI_DSI_H
-#define VIDEO_SH_MIPI_DSI_H
-
-enum sh_mipi_dsi_data_fmt {
- MIPI_RGB888,
- MIPI_RGB565,
- MIPI_RGB666_LP,
- MIPI_RGB666,
- MIPI_BGR888,
- MIPI_BGR565,
- MIPI_BGR666_LP,
- MIPI_BGR666,
- MIPI_YUYV,
- MIPI_UYVY,
- MIPI_YUV420_L,
- MIPI_YUV420,
-};
-
-#define SH_MIPI_DSI_HSABM (1 << 0)
-#define SH_MIPI_DSI_HBPBM (1 << 1)
-#define SH_MIPI_DSI_HFPBM (1 << 2)
-#define SH_MIPI_DSI_BL2E (1 << 3)
-#define SH_MIPI_DSI_VSEE (1 << 4)
-#define SH_MIPI_DSI_HSEE (1 << 5)
-#define SH_MIPI_DSI_HSAE (1 << 6)
-
-#define SH_MIPI_DSI_HSbyteCLK (1 << 24)
-#define SH_MIPI_DSI_HS6divCLK (1 << 25)
-#define SH_MIPI_DSI_HS4divCLK (1 << 26)
-
-#define SH_MIPI_DSI_SYNC_PULSES_MODE (SH_MIPI_DSI_VSEE | \
- SH_MIPI_DSI_HSEE | \
- SH_MIPI_DSI_HSAE)
-#define SH_MIPI_DSI_SYNC_EVENTS_MODE (0)
-#define SH_MIPI_DSI_SYNC_BURST_MODE (SH_MIPI_DSI_BL2E)
-
-struct sh_mipi_dsi_info {
- enum sh_mipi_dsi_data_fmt data_format;
- int channel;
- int lane;
- unsigned long flags;
- u32 clksrc;
- u32 phyctrl; /* for extra setting */
- unsigned int vsynw_offset;
- int (*set_dot_clock)(struct platform_device *pdev,
- void __iomem *base,
- int enable);
-};
-
-#endif
diff --git a/include/xen/page.h b/include/xen/page.h
index 96294ac93755..9dc46cb8a0fd 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -15,9 +15,9 @@
*/
#define xen_pfn_to_page(xen_pfn) \
- ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+ (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
#define page_to_xen_pfn(page) \
- (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+ ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE)