summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actypes.h3
-rw-r--r--include/asm-generic/atomic-instrumented.h711
-rw-r--r--include/asm-generic/atomic-long.h331
-rw-r--r--include/asm-generic/barrier.h16
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h14
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h10
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h16
-rw-r--r--include/asm-generic/bug.h9
-rw-r--r--include/asm-generic/cacheflush.h5
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h1
-rw-r--r--include/asm-generic/pgtable-nopud.h1
-rw-r--r--include/drm/drm_displayid.h2
-rw-r--r--include/dt-bindings/clock/agilex-clock.h70
-rw-r--r--include/dt-bindings/clock/at91.h4
-rw-r--r--include/dt-bindings/clock/bt1-ccu.h48
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h5
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h90
-rw-r--r--include/dt-bindings/clock/intel,lgm-clk.h165
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h10
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h3
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/mt6765-clk.h313
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h206
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h1
-rw-r--r--include/dt-bindings/clock/sprd,sc9863a-clk.h5
-rw-r--r--include/dt-bindings/clock/tegra210-car.h4
-rw-r--r--include/dt-bindings/clock/x1000-cgu.h64
-rw-r--r--include/dt-bindings/clock/x1830-cgu.h55
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h33
-rw-r--r--include/dt-bindings/power/marvell,mmp2.h11
-rw-r--r--include/dt-bindings/reset/bt1-ccu.h25
-rw-r--r--include/dt-bindings/reset/qcom,gcc-msm8939.h110
-rw-r--r--include/keys/encrypted-type.h2
-rw-r--r--include/keys/rxrpc-type.h4
-rw-r--r--include/kunit/test.h12
-rw-r--r--include/linux/atomic-arch-fallback.h2291
-rw-r--r--include/linux/atomic-fallback.h346
-rw-r--r--include/linux/atomic.h11
-rw-r--r--include/linux/bch.h11
-rw-r--r--include/linux/bsearch.h26
-rw-r--r--include/linux/cache.h10
-rw-r--r--include/linux/can/skb.h2
-rw-r--r--include/linux/cb710.h2
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/clk/tegra.h27
-rw-r--r--include/linux/compiler-clang.h11
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h157
-rw-r--r--include/linux/compiler_types.h87
-rw-r--r--include/linux/context_tracking.h6
-rw-r--r--include/linux/context_tracking_state.h6
-rw-r--r--include/linux/cpu_cooling.h12
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/crash_dump.h3
-rw-r--r--include/linux/dax.h1
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/dma-noncoherent.h2
-rw-r--r--include/linux/dmaengine.h4
-rw-r--r--include/linux/edac.h8
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/hardirq.h41
-rw-r--r--include/linux/hmm.h2
-rw-r--r--include/linux/host1x.h3
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/i2c-smbus.h8
-rw-r--r--include/linux/i2c.h16
-rw-r--r--include/linux/idle_inject.h4
-rw-r--r--include/linux/input/gp2ap002a00f.h23
-rw-r--r--include/linux/input/mt.h5
-rw-r--r--include/linux/instrumented.h109
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/interconnect.h6
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/irqflags.h4
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kallsyms.h4
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kcsan-checks.h430
-rw-r--r--include/linux/kcsan.h59
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key.h33
-rw-r--r--include/linux/kgdb.h5
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kthread.h6
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/libata.h9
-rw-r--r--include/linux/lsm_audit.h1
-rw-r--r--include/linux/lsm_hook_defs.h11
-rw-r--r--include/linux/lsm_hooks.h23
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h10
-rw-r--r--include/linux/mm.h13
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmap_lock.h90
-rw-r--r--include/linux/mmu_context.h5
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/mmzone.h18
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/cfi.h6
-rw-r--r--include/linux/mtd/mtd.h7
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h131
-rw-r--r--include/linux/mtd/spi-nor.h24
-rw-r--r--include/linux/netdevice.h14
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h3
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/overflow.h25
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pgtable.h (renamed from include/asm-generic/pgtable.h)135
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/pipe_fs_i.h27
-rw-r--r--include/linux/platform_data/i2c-pxa.h48
-rw-r--r--include/linux/platform_data/mtd-davinci.h2
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h2
-rw-r--r--include/linux/pm_opp.h18
-rw-r--r--include/linux/power_supply.h13
-rw-r--r--include/linux/psp-sev.h2
-rw-r--r--include/linux/qed/qed_chain.h26
-rw-r--r--include/linux/ras.h5
-rw-r--r--include/linux/regset.h2
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/sched/debug.h3
-rw-r--r--include/linux/sched/mm.h10
-rw-r--r--include/linux/sctp.h36
-rw-r--r--include/linux/security.h39
-rw-r--r--include/linux/seqlock.h51
-rw-r--r--include/linux/set_memory.h2
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/sunrpc/auth.h5
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h6
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/sunrpc/svcsock.h6
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/thermal.h84
-rw-r--r--include/linux/tifm.h2
-rw-r--r--include/linux/uaccess.h89
-rw-r--r--include/linux/vdpa.h16
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vringh.h6
-rw-r--r--include/linux/watch_queue.h127
-rw-r--r--include/media/videobuf2-dma-contig.h2
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/flow_offload.h45
-rw-r--r--include/net/gue.h2
-rw-r--r--include/net/inet_hashtables.h6
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/nf_flow_table.h49
-rw-r--r--include/net/nexthop.h28
-rw-r--r--include/net/sctp/constants.h8
-rw-r--r--include/net/sock.h1
-rw-r--r--include/net/tc_act/tc_ct.h11
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/sound/dmaengine_pcm.h11
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/target/target_core_fabric.h9
-rw-r--r--include/trace/events/block.h7
-rw-r--r--include/trace/events/f2fs.h83
-rw-r--r--include/trace/events/rpcgss.h89
-rw-r--r--include/trace/events/rpcrdma.h146
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/trace/events/sunrpc.h748
-rw-r--r--include/uapi/linux/bpf.h15
-rw-r--r--include/uapi/linux/fb.h1
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/keyctl.h2
-rw-r--r--include/uapi/linux/mrp_bridge.h1
-rw-r--r--include/uapi/linux/ndctl.h1
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/rds.h4
-rw-r--r--include/uapi/linux/spi/spidev.h4
-rw-r--r--include/uapi/linux/vhost.h4
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_mem.h211
-rw-r--r--include/uapi/linux/virtio_ring.h48
-rw-r--r--include/uapi/linux/watch_queue.h104
-rw-r--r--include/uapi/linux/xattr.h4
-rw-r--r--include/uapi/mtd/mtd-abi.h1
-rw-r--r--include/xen/arm/page.h2
-rw-r--r--include/xen/events.h7
-rw-r--r--include/xen/hvm.h2
-rw-r--r--include/xen/interface/hvm/hvm_op.h2
-rw-r--r--include/xen/xen-ops.h19
204 files changed, 7600 insertions, 1564 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 1dc8d262035b..459d6981ca96 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20200430
+#define ACPI_CA_VERSION 0x20200528
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4defed58ea33..aa236b9e6f24 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -815,8 +815,9 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8
#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9
#define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10
+#define ACPI_ADR_SPACE_PLATFORM_RT (acpi_adr_space_type) 11
-#define ACPI_NUM_PREDEFINED_REGIONS 11
+#define ACPI_NUM_PREDEFINED_REGIONS 12
/*
* Special Address Spaces
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index e8730c6b9fe2..379986e40159 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -18,1623 +18,1624 @@
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
-static inline int
+static __always_inline int
atomic_read(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}
#define atomic_read atomic_read
#if defined(arch_atomic_read_acquire)
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}
#define atomic_read_acquire atomic_read_acquire
#endif
-static inline void
+static __always_inline void
atomic_set(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
#define atomic_set atomic_set
#if defined(arch_atomic_set_release)
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}
#define atomic_set_release atomic_set_release
#endif
-static inline void
+static __always_inline void
atomic_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
#define atomic_add atomic_add
#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
#define atomic_add_return atomic_add_return
#endif
#if defined(arch_atomic_add_return_acquire)
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
#define atomic_add_return_acquire atomic_add_return_acquire
#endif
#if defined(arch_atomic_add_return_release)
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
#define atomic_add_return_release atomic_add_return_release
#endif
#if defined(arch_atomic_add_return_relaxed)
-static inline int
+static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
#endif
#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
#define atomic_fetch_add atomic_fetch_add
#endif
#if defined(arch_atomic_fetch_add_acquire)
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#endif
#if defined(arch_atomic_fetch_add_release)
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
#define atomic_fetch_add_release atomic_fetch_add_release
#endif
#if defined(arch_atomic_fetch_add_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
#define atomic_sub atomic_sub
#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
#define atomic_sub_return atomic_sub_return
#endif
#if defined(arch_atomic_sub_return_acquire)
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
#define atomic_sub_return_acquire atomic_sub_return_acquire
#endif
#if defined(arch_atomic_sub_return_release)
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
#define atomic_sub_return_release atomic_sub_return_release
#endif
#if defined(arch_atomic_sub_return_relaxed)
-static inline int
+static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#endif
#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
#define atomic_fetch_sub atomic_fetch_sub
#endif
#if defined(arch_atomic_fetch_sub_acquire)
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
#endif
#if defined(arch_atomic_fetch_sub_release)
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
#define atomic_fetch_sub_release atomic_fetch_sub_release
#endif
#if defined(arch_atomic_fetch_sub_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#endif
#if defined(arch_atomic_inc)
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_inc(v);
}
#define atomic_inc atomic_inc
#endif
#if defined(arch_atomic_inc_return)
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
#define atomic_inc_return atomic_inc_return
#endif
#if defined(arch_atomic_inc_return_acquire)
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
#define atomic_inc_return_acquire atomic_inc_return_acquire
#endif
#if defined(arch_atomic_inc_return_release)
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
#define atomic_inc_return_release atomic_inc_return_release
#endif
#if defined(arch_atomic_inc_return_relaxed)
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#endif
#if defined(arch_atomic_fetch_inc)
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
#define atomic_fetch_inc atomic_fetch_inc
#endif
#if defined(arch_atomic_fetch_inc_acquire)
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
#endif
#if defined(arch_atomic_fetch_inc_release)
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
#define atomic_fetch_inc_release atomic_fetch_inc_release
#endif
#if defined(arch_atomic_fetch_inc_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
#endif
#if defined(arch_atomic_dec)
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_dec(v);
}
#define atomic_dec atomic_dec
#endif
#if defined(arch_atomic_dec_return)
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
#define atomic_dec_return atomic_dec_return
#endif
#if defined(arch_atomic_dec_return_acquire)
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
#define atomic_dec_return_acquire atomic_dec_return_acquire
#endif
#if defined(arch_atomic_dec_return_release)
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
#define atomic_dec_return_release atomic_dec_return_release
#endif
#if defined(arch_atomic_dec_return_relaxed)
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
#endif
#if defined(arch_atomic_fetch_dec)
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
#define atomic_fetch_dec atomic_fetch_dec
#endif
#if defined(arch_atomic_fetch_dec_acquire)
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
#endif
#if defined(arch_atomic_fetch_dec_release)
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
#define atomic_fetch_dec_release atomic_fetch_dec_release
#endif
#if defined(arch_atomic_fetch_dec_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
#define atomic_and atomic_and
#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
#define atomic_fetch_and atomic_fetch_and
#endif
#if defined(arch_atomic_fetch_and_acquire)
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
#endif
#if defined(arch_atomic_fetch_and_release)
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
#define atomic_fetch_and_release atomic_fetch_and_release
#endif
#if defined(arch_atomic_fetch_and_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#endif
#if defined(arch_atomic_andnot)
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
#define atomic_andnot atomic_andnot
#endif
#if defined(arch_atomic_fetch_andnot)
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
#define atomic_fetch_andnot atomic_fetch_andnot
#endif
#if defined(arch_atomic_fetch_andnot_acquire)
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
#endif
#if defined(arch_atomic_fetch_andnot_release)
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
#endif
#if defined(arch_atomic_fetch_andnot_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
#define atomic_or atomic_or
#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
#define atomic_fetch_or atomic_fetch_or
#endif
#if defined(arch_atomic_fetch_or_acquire)
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
#endif
#if defined(arch_atomic_fetch_or_release)
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
#define atomic_fetch_or_release atomic_fetch_or_release
#endif
#if defined(arch_atomic_fetch_or_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
#define atomic_xor atomic_xor
#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
#define atomic_fetch_xor atomic_fetch_xor
#endif
#if defined(arch_atomic_fetch_xor_acquire)
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
#endif
#if defined(arch_atomic_fetch_xor_release)
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
#define atomic_fetch_xor_release atomic_fetch_xor_release
#endif
#if defined(arch_atomic_fetch_xor_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#endif
#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
#define atomic_xchg atomic_xchg
#endif
#if defined(arch_atomic_xchg_acquire)
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
#define atomic_xchg_acquire atomic_xchg_acquire
#endif
#if defined(arch_atomic_xchg_release)
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
#define atomic_xchg_release atomic_xchg_release
#endif
#if defined(arch_atomic_xchg_relaxed)
-static inline int
+static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
#define atomic_xchg_relaxed atomic_xchg_relaxed
#endif
#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
#define atomic_cmpxchg atomic_cmpxchg
#endif
#if defined(arch_atomic_cmpxchg_acquire)
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
#endif
#if defined(arch_atomic_cmpxchg_release)
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
#define atomic_cmpxchg_release atomic_cmpxchg_release
#endif
#if defined(arch_atomic_cmpxchg_relaxed)
-static inline int
+static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
#endif
#if defined(arch_atomic_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
#endif
#if defined(arch_atomic_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
#endif
#if defined(arch_atomic_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
#endif
#if defined(arch_atomic_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic_sub_and_test)
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
#define atomic_sub_and_test atomic_sub_and_test
#endif
#if defined(arch_atomic_dec_and_test)
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
#define atomic_dec_and_test atomic_dec_and_test
#endif
#if defined(arch_atomic_inc_and_test)
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
#define atomic_inc_and_test atomic_inc_and_test
#endif
#if defined(arch_atomic_add_negative)
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
#define atomic_add_negative atomic_add_negative
#endif
#if defined(arch_atomic_fetch_add_unless)
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
#if defined(arch_atomic_add_unless)
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
#define atomic_add_unless atomic_add_unless
#endif
#if defined(arch_atomic_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
#define atomic_inc_not_zero atomic_inc_not_zero
#endif
#if defined(arch_atomic_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
#define atomic_inc_unless_negative atomic_inc_unless_negative
#endif
#if defined(arch_atomic_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
#endif
#if defined(arch_atomic_dec_if_positive)
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-static inline s64
+static __always_inline s64
atomic64_read(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
#define atomic64_read atomic64_read
#if defined(arch_atomic64_read_acquire)
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}
#define atomic64_read_acquire atomic64_read_acquire
#endif
-static inline void
+static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
#define atomic64_set atomic64_set
#if defined(arch_atomic64_set_release)
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}
#define atomic64_set_release atomic64_set_release
#endif
-static inline void
+static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
#define atomic64_add atomic64_add
#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
#define atomic64_add_return atomic64_add_return
#endif
#if defined(arch_atomic64_add_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
#define atomic64_add_return_acquire atomic64_add_return_acquire
#endif
#if defined(arch_atomic64_add_return_release)
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
#define atomic64_add_return_release atomic64_add_return_release
#endif
#if defined(arch_atomic64_add_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
#define atomic64_fetch_add atomic64_fetch_add
#endif
#if defined(arch_atomic64_fetch_add_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#endif
#if defined(arch_atomic64_fetch_add_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
#define atomic64_fetch_add_release atomic64_fetch_add_release
#endif
#if defined(arch_atomic64_fetch_add_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
#define atomic64_sub atomic64_sub
#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
#define atomic64_sub_return atomic64_sub_return
#endif
#if defined(arch_atomic64_sub_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#endif
#if defined(arch_atomic64_sub_return_release)
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
#define atomic64_sub_return_release atomic64_sub_return_release
#endif
#if defined(arch_atomic64_sub_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
#define atomic64_fetch_sub atomic64_fetch_sub
#endif
#if defined(arch_atomic64_fetch_sub_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
#endif
#if defined(arch_atomic64_fetch_sub_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
#endif
#if defined(arch_atomic64_fetch_sub_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#endif
#if defined(arch_atomic64_inc)
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
#define atomic64_inc atomic64_inc
#endif
#if defined(arch_atomic64_inc_return)
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
#define atomic64_inc_return atomic64_inc_return
#endif
#if defined(arch_atomic64_inc_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
#define atomic64_inc_return_acquire atomic64_inc_return_acquire
#endif
#if defined(arch_atomic64_inc_return_release)
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
#define atomic64_inc_return_release atomic64_inc_return_release
#endif
#if defined(arch_atomic64_inc_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#endif
#if defined(arch_atomic64_fetch_inc)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
#define atomic64_fetch_inc atomic64_fetch_inc
#endif
#if defined(arch_atomic64_fetch_inc_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
#endif
#if defined(arch_atomic64_fetch_inc_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
#define atomic64_fetch_inc_release atomic64_fetch_inc_release
#endif
#if defined(arch_atomic64_fetch_inc_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
#endif
#if defined(arch_atomic64_dec)
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
#define atomic64_dec atomic64_dec
#endif
#if defined(arch_atomic64_dec_return)
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
#define atomic64_dec_return atomic64_dec_return
#endif
#if defined(arch_atomic64_dec_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
#define atomic64_dec_return_acquire atomic64_dec_return_acquire
#endif
#if defined(arch_atomic64_dec_return_release)
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
#define atomic64_dec_return_release atomic64_dec_return_release
#endif
#if defined(arch_atomic64_dec_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
#endif
#if defined(arch_atomic64_fetch_dec)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
#define atomic64_fetch_dec atomic64_fetch_dec
#endif
#if defined(arch_atomic64_fetch_dec_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
#endif
#if defined(arch_atomic64_fetch_dec_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
#define atomic64_fetch_dec_release atomic64_fetch_dec_release
#endif
#if defined(arch_atomic64_fetch_dec_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
#define atomic64_and atomic64_and
#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
#define atomic64_fetch_and atomic64_fetch_and
#endif
#if defined(arch_atomic64_fetch_and_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
#endif
#if defined(arch_atomic64_fetch_and_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
#define atomic64_fetch_and_release atomic64_fetch_and_release
#endif
#if defined(arch_atomic64_fetch_and_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#endif
#if defined(arch_atomic64_andnot)
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
#define atomic64_andnot atomic64_andnot
#endif
#if defined(arch_atomic64_fetch_andnot)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
#define atomic64_fetch_andnot atomic64_fetch_andnot
#endif
#if defined(arch_atomic64_fetch_andnot_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
#endif
#if defined(arch_atomic64_fetch_andnot_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
#endif
#if defined(arch_atomic64_fetch_andnot_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
#define atomic64_or atomic64_or
#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
#define atomic64_fetch_or atomic64_fetch_or
#endif
#if defined(arch_atomic64_fetch_or_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
#endif
#if defined(arch_atomic64_fetch_or_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
#define atomic64_fetch_or_release atomic64_fetch_or_release
#endif
#if defined(arch_atomic64_fetch_or_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
#define atomic64_xor atomic64_xor
#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
#define atomic64_fetch_xor atomic64_fetch_xor
#endif
#if defined(arch_atomic64_fetch_xor_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
#endif
#if defined(arch_atomic64_fetch_xor_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
#endif
#if defined(arch_atomic64_fetch_xor_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#endif
#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
#define atomic64_xchg atomic64_xchg
#endif
#if defined(arch_atomic64_xchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
#define atomic64_xchg_acquire atomic64_xchg_acquire
#endif
#if defined(arch_atomic64_xchg_release)
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
#define atomic64_xchg_release atomic64_xchg_release
#endif
#if defined(arch_atomic64_xchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
#endif
#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
#define atomic64_cmpxchg atomic64_cmpxchg
#endif
#if defined(arch_atomic64_cmpxchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
#endif
#if defined(arch_atomic64_cmpxchg_release)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
#define atomic64_cmpxchg_release atomic64_cmpxchg_release
#endif
#if defined(arch_atomic64_cmpxchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
#endif
#if defined(arch_atomic64_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
#endif
#if defined(arch_atomic64_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
#endif
#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_sub_and_test)
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
#define atomic64_sub_and_test atomic64_sub_and_test
#endif
#if defined(arch_atomic64_dec_and_test)
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
#define atomic64_dec_and_test atomic64_dec_and_test
#endif
#if defined(arch_atomic64_inc_and_test)
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
#define atomic64_inc_and_test atomic64_inc_and_test
#endif
#if defined(arch_atomic64_add_negative)
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
#define atomic64_add_negative atomic64_add_negative
#endif
#if defined(arch_atomic64_fetch_add_unless)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
#if defined(arch_atomic64_add_unless)
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
#define atomic64_add_unless atomic64_add_unless
#endif
#if defined(arch_atomic64_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
#define atomic64_inc_not_zero atomic64_inc_not_zero
#endif
#if defined(arch_atomic64_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
#endif
#if defined(arch_atomic64_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
#endif
#if defined(arch_atomic64_dec_if_positive)
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1644,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1653,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1662,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1671,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1680,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1689,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1698,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1707,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1716,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1725,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1734,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1743,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1751,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
})
#define sync_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_double(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
})
@@ -1780,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_double_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// b29b625d5de9280f680e42c7be859b55b15e5f6a
+// 89bf97f3a7509b740845e51ddf31055b48a81f40
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 881c7e27af28..073cf40f431b 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -6,6 +6,7 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
@@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t;
#ifdef CONFIG_64BIT
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic64_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic64_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic64_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic64_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic64_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic64_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic64_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic64_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic64_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic64_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic64_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic64_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic64_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic64_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic64_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic64_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic64_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic64_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic64_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic64_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic64_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic64_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic64_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic64_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic64_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic64_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic64_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic64_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic64_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic64_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic64_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic64_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic64_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic64_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic64_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic64_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic64_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic64_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic64_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic64_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic64_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic64_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic64_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic64_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic64_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic64_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic64_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic64_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic64_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic64_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic64_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic64_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic64_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic64_dec_if_positive(v);
@@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#else /* CONFIG_64BIT */
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_acquire(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_release(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic_dec_if_positive(v);
@@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* CONFIG_64BIT */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// 77558968132ce4f911ad53f6f52ce423006f6268
+// a624200981f552b2c6be4f32fe44da8289f30d87
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 85b28eb80b11..2eacaf7d62f6 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -128,10 +128,10 @@ do { \
#ifndef __smp_load_acquire
#define __smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
@@ -183,10 +183,10 @@ do { \
#ifndef smp_load_acquire
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
@@ -229,14 +229,14 @@ do { \
#ifndef smp_cond_load_relaxed
#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#endif
@@ -250,10 +250,10 @@ do { \
*/
#ifndef smp_cond_load_acquire
#define smp_cond_load_acquire(ptr, cond_expr) ({ \
- typeof(*ptr) _val; \
+ __unqual_scalar_typeof(*ptr) _val; \
_val = smp_cond_load_relaxed(ptr, cond_expr); \
smp_acquire__after_ctrl_dep(); \
- _val; \
+ (typeof(*ptr))_val; \
})
#endif
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9e8eec..fb2cb33a4013 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
@@ -25,7 +25,7 @@
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
@@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fdeea9ec..b9bec468ae03 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
@@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d128a1..20f788a25ef9 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* __set_bit - Set a bit in memory
@@ -24,7 +24,7 @@
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
@@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
}
@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
}
@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
}
@@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 384b5c835ced..c94e33ae3e7b 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -83,14 +83,19 @@ extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
#define __WARN() __WARN_printf(TAINT_WARN, NULL)
-#define __WARN_printf(taint, arg...) \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
+#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ instrumentation_end(); \
+ } while (0)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
#define __WARN_printf(taint, arg...) do { \
+ instrumentation_begin(); \
__warn_printk(arg); \
__WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ instrumentation_end(); \
} while (0)
#define WARN_ON_ONCE(condition) ({ \
int __ret_warn_on = !!(condition); \
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 907fa5d16494..4a674db4e1fa 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -2,6 +2,11 @@
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 40f85decc2ee..8e1e6244a89d 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -122,7 +122,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(pte_t *ptep)
{
- return READ_ONCE(*ptep);
+ return ptep_get(ptep);
}
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3a7871130112..8b1e020e9a03 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -972,7 +972,7 @@ static inline void iounmap(void __iomem *addr)
}
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
void iounmap(volatile void __iomem *addr);
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 0d9b28cba16d..3e13acd019ae 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -45,6 +45,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
#define pmd_val(x) (pud_val((x).pud))
#define __pmd(x) ((pmd_t) { __pud(x) } )
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index ad05c1684bfc..a9d751fbda9e 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -43,6 +43,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
return (pud_t *)p4d;
}
+#define pud_offset pud_offset
#define pud_val(x) (p4d_val((x).p4d))
#define __pud(x) ((pud_t) { __p4d(x) })
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 27bdd273fc4e..77941efb5426 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -89,7 +89,7 @@ struct displayid_detailed_timings_1 {
struct displayid_detailed_timing_block {
struct displayid_block base;
- struct displayid_detailed_timings_1 timings[0];
+ struct displayid_detailed_timings_1 timings[];
};
#define for_each_displayid_db(displayid, block, idx, length) \
diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h
new file mode 100644
index 000000000000..f19cf8ccbdd2
--- /dev/null
+++ b/include/dt-bindings/clock/agilex-clock.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#ifndef __AGILEX_CLOCK_H
+#define __AGILEX_CLOCK_H
+
+/* fixed rate clocks */
+#define AGILEX_OSC1 0
+#define AGILEX_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX_CB_INTOSC_LS_CLK 2
+#define AGILEX_L4_SYS_FREE_CLK 3
+#define AGILEX_F2S_FREE_CLK 4
+
+/* PLL clocks */
+#define AGILEX_MAIN_PLL_CLK 5
+#define AGILEX_MAIN_PLL_C0_CLK 6
+#define AGILEX_MAIN_PLL_C1_CLK 7
+#define AGILEX_MAIN_PLL_C2_CLK 8
+#define AGILEX_MAIN_PLL_C3_CLK 9
+#define AGILEX_PERIPH_PLL_CLK 10
+#define AGILEX_PERIPH_PLL_C0_CLK 11
+#define AGILEX_PERIPH_PLL_C1_CLK 12
+#define AGILEX_PERIPH_PLL_C2_CLK 13
+#define AGILEX_PERIPH_PLL_C3_CLK 14
+#define AGILEX_MPU_FREE_CLK 15
+#define AGILEX_MPU_CCU_CLK 16
+#define AGILEX_BOOT_CLK 17
+
+/* fixed factor clocks */
+#define AGILEX_L3_MAIN_FREE_CLK 18
+#define AGILEX_NOC_FREE_CLK 19
+#define AGILEX_S2F_USR0_CLK 20
+#define AGILEX_NOC_CLK 21
+#define AGILEX_EMAC_A_FREE_CLK 22
+#define AGILEX_EMAC_B_FREE_CLK 23
+#define AGILEX_EMAC_PTP_FREE_CLK 24
+#define AGILEX_GPIO_DB_FREE_CLK 25
+#define AGILEX_SDMMC_FREE_CLK 26
+#define AGILEX_S2F_USER0_FREE_CLK 27
+#define AGILEX_S2F_USER1_FREE_CLK 28
+#define AGILEX_PSI_REF_FREE_CLK 29
+
+/* Gate clocks */
+#define AGILEX_MPU_CLK 30
+#define AGILEX_MPU_L2RAM_CLK 31
+#define AGILEX_MPU_PERIPH_CLK 32
+#define AGILEX_L4_MAIN_CLK 33
+#define AGILEX_L4_MP_CLK 34
+#define AGILEX_L4_SP_CLK 35
+#define AGILEX_CS_AT_CLK 36
+#define AGILEX_CS_TRACE_CLK 37
+#define AGILEX_CS_PDBG_CLK 38
+#define AGILEX_CS_TIMER_CLK 39
+#define AGILEX_S2F_USER0_CLK 40
+#define AGILEX_EMAC0_CLK 41
+#define AGILEX_EMAC1_CLK 43
+#define AGILEX_EMAC2_CLK 44
+#define AGILEX_EMAC_PTP_CLK 45
+#define AGILEX_GPIO_DB_CLK 46
+#define AGILEX_NAND_CLK 47
+#define AGILEX_PSI_REF_CLK 48
+#define AGILEX_S2F_USER1_CLK 49
+#define AGILEX_SDMMC_CLK 50
+#define AGILEX_SPI_M_CLK 51
+#define AGILEX_USB_CLK 52
+#define AGILEX_NUM_CLKS 53
+
+#endif /* __AGILEX_CLOCK_H */
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 38b5554153c8..eba17106608b 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -12,6 +12,7 @@
#define PMC_TYPE_SYSTEM 1
#define PMC_TYPE_PERIPHERAL 2
#define PMC_TYPE_GCK 3
+#define PMC_TYPE_PROGRAMMABLE 4
#define PMC_SLOW 0
#define PMC_MCK 1
@@ -20,6 +21,9 @@
#define PMC_MCK2 4
#define PMC_I2S0_MUX 5
#define PMC_I2S1_MUX 6
+#define PMC_PLLACK 7
+#define PMC_PLLBCK 8
+#define PMC_AUDIOPLLCK 9
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
diff --git a/include/dt-bindings/clock/bt1-ccu.h b/include/dt-bindings/clock/bt1-ccu.h
new file mode 100644
index 000000000000..5f166d27a00a
--- /dev/null
+++ b/include/dt-bindings/clock/bt1-ccu.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU clock indices
+ */
+#ifndef __DT_BINDINGS_CLOCK_BT1_CCU_H
+#define __DT_BINDINGS_CLOCK_BT1_CCU_H
+
+#define CCU_CPU_PLL 0
+#define CCU_SATA_PLL 1
+#define CCU_DDR_PLL 2
+#define CCU_PCIE_PLL 3
+#define CCU_ETH_PLL 4
+
+#define CCU_AXI_MAIN_CLK 0
+#define CCU_AXI_DDR_CLK 1
+#define CCU_AXI_SATA_CLK 2
+#define CCU_AXI_GMAC0_CLK 3
+#define CCU_AXI_GMAC1_CLK 4
+#define CCU_AXI_XGMAC_CLK 5
+#define CCU_AXI_PCIE_M_CLK 6
+#define CCU_AXI_PCIE_S_CLK 7
+#define CCU_AXI_USB_CLK 8
+#define CCU_AXI_HWA_CLK 9
+#define CCU_AXI_SRAM_CLK 10
+
+#define CCU_SYS_SATA_REF_CLK 0
+#define CCU_SYS_APB_CLK 1
+#define CCU_SYS_GMAC0_TX_CLK 2
+#define CCU_SYS_GMAC0_PTP_CLK 3
+#define CCU_SYS_GMAC1_TX_CLK 4
+#define CCU_SYS_GMAC1_PTP_CLK 5
+#define CCU_SYS_XGMAC_REF_CLK 6
+#define CCU_SYS_XGMAC_PTP_CLK 7
+#define CCU_SYS_USB_CLK 8
+#define CCU_SYS_PVT_CLK 9
+#define CCU_SYS_HWA_CLK 10
+#define CCU_SYS_UART_CLK 11
+#define CCU_SYS_I2C1_CLK 12
+#define CCU_SYS_I2C2_CLK 13
+#define CCU_SYS_GPIO_CLK 14
+#define CCU_SYS_TIMER0_CLK 15
+#define CCU_SYS_TIMER1_CLK 16
+#define CCU_SYS_TIMER2_CLK 17
+#define CCU_SYS_WDT_CLK 18
+
+#endif /* __DT_BINDINGS_CLOCK_BT1_CCU_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
index 38145bdcd975..b58370d146e2 100644
--- a/include/dt-bindings/clock/imx7ulp-clock.h
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -58,7 +58,10 @@
#define IMX7ULP_CLK_HSRUN_SYS_SEL 44
#define IMX7ULP_CLK_HSRUN_CORE_DIV 45
-#define IMX7ULP_CLK_SCG1_END 46
+#define IMX7ULP_CLK_CORE 46
+#define IMX7ULP_CLK_HSRUN_CORE 47
+
+#define IMX7ULP_CLK_SCG1_END 48
/* PCC2 */
#define IMX7ULP_CLK_DMA1 0
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 47ab082238b4..7a23f289b27f 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -296,6 +296,94 @@
#define IMX8MP_CLK_ARM 287
#define IMX8MP_CLK_A53_CORE 288
-#define IMX8MP_CLK_END 289
+#define IMX8MP_SYS_PLL1_40M_CG 289
+#define IMX8MP_SYS_PLL1_80M_CG 290
+#define IMX8MP_SYS_PLL1_100M_CG 291
+#define IMX8MP_SYS_PLL1_133M_CG 292
+#define IMX8MP_SYS_PLL1_160M_CG 293
+#define IMX8MP_SYS_PLL1_200M_CG 294
+#define IMX8MP_SYS_PLL1_266M_CG 295
+#define IMX8MP_SYS_PLL1_400M_CG 296
+#define IMX8MP_SYS_PLL2_50M_CG 297
+#define IMX8MP_SYS_PLL2_100M_CG 298
+#define IMX8MP_SYS_PLL2_125M_CG 299
+#define IMX8MP_SYS_PLL2_166M_CG 300
+#define IMX8MP_SYS_PLL2_200M_CG 301
+#define IMX8MP_SYS_PLL2_250M_CG 302
+#define IMX8MP_SYS_PLL2_333M_CG 303
+#define IMX8MP_SYS_PLL2_500M_CG 304
+
+#define IMX8MP_CLK_M7_CORE 305
+#define IMX8MP_CLK_ML_CORE 306
+#define IMX8MP_CLK_GPU3D_CORE 307
+#define IMX8MP_CLK_GPU3D_SHADER_CORE 308
+#define IMX8MP_CLK_GPU2D_CORE 309
+#define IMX8MP_CLK_AUDIO_AXI 310
+#define IMX8MP_CLK_HSIO_AXI 311
+#define IMX8MP_CLK_MEDIA_ISP 312
+
+#define IMX8MP_CLK_END 313
+
+#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2 2
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK3 3
+#define IMX8MP_CLK_AUDIOMIX_SAI2_IPG 4
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1 5
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2 6
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK3 7
+#define IMX8MP_CLK_AUDIOMIX_SAI3_IPG 8
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1 9
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2 10
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK3 11
+#define IMX8MP_CLK_AUDIOMIX_SAI5_IPG 12
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1 13
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2 14
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK3 15
+#define IMX8MP_CLK_AUDIOMIX_SAI6_IPG 16
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1 17
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2 18
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK3 19
+#define IMX8MP_CLK_AUDIOMIX_SAI7_IPG 20
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1 21
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2 22
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK3 23
+#define IMX8MP_CLK_AUDIOMIX_ASRC_IPG 24
+#define IMX8MP_CLK_AUDIOMIX_PDM_IPG 25
+#define IMX8MP_CLK_AUDIOMIX_SDMA2_ROOT 26
+#define IMX8MP_CLK_AUDIOMIX_SDMA3_ROOT 27
+#define IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT 28
+#define IMX8MP_CLK_AUDIOMIX_DSP_ROOT 29
+#define IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT 30
+#define IMX8MP_CLK_AUDIOMIX_EARC_IPG 31
+#define IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG 32
+#define IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG 33
+#define IMX8MP_CLK_AUDIOMIX_EDMA_ROOT 34
+#define IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT 35
+#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
+#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
+#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
+#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2_SEL 43
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1_SEL 44
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2_SEL 45
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK1_SEL 46
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK2_SEL 47
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1_SEL 48
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2_SEL 49
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1_SEL 50
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2_SEL 51
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1_SEL 52
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2_SEL 53
+#define IMX8MP_CLK_AUDIOMIX_PDM_SEL 54
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL 55
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL 56
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS 57
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT 58
+
+#define IMX8MP_CLK_AUDIOMIX_END 59
#endif
diff --git a/include/dt-bindings/clock/intel,lgm-clk.h b/include/dt-bindings/clock/intel,lgm-clk.h
new file mode 100644
index 000000000000..92f5be6490bb
--- /dev/null
+++ b/include/dt-bindings/clock/intel,lgm-clk.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Lei Chuanhua <Chuanhua.lei@intel.com>
+ * Zhu Yixin <Yixin.zhu@intel.com>
+ */
+#ifndef __INTEL_LGM_CLK_H
+#define __INTEL_LGM_CLK_H
+
+/* PLL clocks */
+#define LGM_CLK_OSC 1
+#define LGM_CLK_PLLPP 2
+#define LGM_CLK_PLL2 3
+#define LGM_CLK_PLL0CZ 4
+#define LGM_CLK_PLL0B 5
+#define LGM_CLK_PLL1 6
+#define LGM_CLK_LJPLL3 7
+#define LGM_CLK_LJPLL4 8
+#define LGM_CLK_PLL0CM0 9
+#define LGM_CLK_PLL0CM1 10
+
+/* clocks from PLLs */
+
+/* ROPLL clocks */
+#define LGM_CLK_PP_HW 15
+#define LGM_CLK_PP_UC 16
+#define LGM_CLK_PP_FXD 17
+#define LGM_CLK_PP_TBM 18
+
+/* PLL2 clocks */
+#define LGM_CLK_DDR 20
+
+/* PLL0CZ */
+#define LGM_CLK_CM 25
+#define LGM_CLK_IC 26
+#define LGM_CLK_SDXC3 27
+
+/* PLL0B */
+#define LGM_CLK_NGI 30
+#define LGM_CLK_NOC4 31
+#define LGM_CLK_SW 32
+#define LGM_CLK_QSPI 33
+#define LGM_CLK_CQEM LGM_CLK_SW
+#define LGM_CLK_EMMC5 LGM_CLK_NOC4
+
+/* PLL1 */
+#define LGM_CLK_CT 35
+#define LGM_CLK_DSP 36
+#define LGM_CLK_VIF 37
+
+/* LJPLL3 */
+#define LGM_CLK_CML 40
+#define LGM_CLK_SERDES 41
+#define LGM_CLK_POOL 42
+#define LGM_CLK_PTP 43
+
+/* LJPLL4 */
+#define LGM_CLK_PCIE 45
+#define LGM_CLK_SATA LGM_CLK_PCIE
+
+/* PLL0CM0 */
+#define LGM_CLK_CPU0 50
+
+/* PLL0CM1 */
+#define LGM_CLK_CPU1 55
+
+/* Miscellaneous clocks */
+#define LGM_CLK_EMMC4 60
+#define LGM_CLK_SDXC2 61
+#define LGM_CLK_EMMC 62
+#define LGM_CLK_SDXC 63
+#define LGM_CLK_SLIC 64
+#define LGM_CLK_DCL 65
+#define LGM_CLK_DOCSIS 66
+#define LGM_CLK_PCM 67
+#define LGM_CLK_DDR_PHY 68
+#define LGM_CLK_PONDEF 69
+#define LGM_CLK_PL25M 70
+#define LGM_CLK_PL10M 71
+#define LGM_CLK_PL1544K 72
+#define LGM_CLK_PL2048K 73
+#define LGM_CLK_PL8K 74
+#define LGM_CLK_PON_NTR 75
+#define LGM_CLK_SYNC0 76
+#define LGM_CLK_SYNC1 77
+#define LGM_CLK_PROGDIV 78
+#define LGM_CLK_OD0 79
+#define LGM_CLK_OD1 80
+#define LGM_CLK_CBPHY0 81
+#define LGM_CLK_CBPHY1 82
+#define LGM_CLK_CBPHY2 83
+#define LGM_CLK_CBPHY3 84
+
+/* Gate clocks */
+/* Gate CLK0 */
+#define LGM_GCLK_C55 100
+#define LGM_GCLK_QSPI 101
+#define LGM_GCLK_EIP197 102
+#define LGM_GCLK_VAULT 103
+#define LGM_GCLK_TOE 104
+#define LGM_GCLK_SDXC 105
+#define LGM_GCLK_EMMC 106
+#define LGM_GCLK_SPI_DBG 107
+#define LGM_GCLK_DMA3 108
+
+/* Gate CLK1 */
+#define LGM_GCLK_DMA0 120
+#define LGM_GCLK_LEDC0 121
+#define LGM_GCLK_LEDC1 122
+#define LGM_GCLK_I2S0 123
+#define LGM_GCLK_I2S1 124
+#define LGM_GCLK_EBU 125
+#define LGM_GCLK_PWM 126
+#define LGM_GCLK_I2C0 127
+#define LGM_GCLK_I2C1 128
+#define LGM_GCLK_I2C2 129
+#define LGM_GCLK_I2C3 130
+#define LGM_GCLK_SSC0 131
+#define LGM_GCLK_SSC1 132
+#define LGM_GCLK_SSC2 133
+#define LGM_GCLK_SSC3 134
+#define LGM_GCLK_GPTC0 135
+#define LGM_GCLK_GPTC1 136
+#define LGM_GCLK_GPTC2 137
+#define LGM_GCLK_GPTC3 138
+#define LGM_GCLK_ASC0 139
+#define LGM_GCLK_ASC1 140
+#define LGM_GCLK_ASC2 141
+#define LGM_GCLK_ASC3 142
+#define LGM_GCLK_PCM0 143
+#define LGM_GCLK_PCM1 144
+#define LGM_GCLK_PCM2 145
+
+/* Gate CLK2 */
+#define LGM_GCLK_PCIE10 150
+#define LGM_GCLK_PCIE11 151
+#define LGM_GCLK_PCIE30 152
+#define LGM_GCLK_PCIE31 153
+#define LGM_GCLK_PCIE20 154
+#define LGM_GCLK_PCIE21 155
+#define LGM_GCLK_PCIE40 156
+#define LGM_GCLK_PCIE41 157
+#define LGM_GCLK_XPCS0 158
+#define LGM_GCLK_XPCS1 159
+#define LGM_GCLK_XPCS2 160
+#define LGM_GCLK_XPCS3 161
+#define LGM_GCLK_SATA0 162
+#define LGM_GCLK_SATA1 163
+#define LGM_GCLK_SATA2 164
+#define LGM_GCLK_SATA3 165
+
+/* Gate CLK3 */
+#define LGM_GCLK_ARCEM4 170
+#define LGM_GCLK_IDMAR1 171
+#define LGM_GCLK_IDMAT0 172
+#define LGM_GCLK_IDMAT1 173
+#define LGM_GCLK_IDMAT2 174
+#define LGM_GCLK_PPV4 175
+#define LGM_GCLK_GSWIPO 176
+#define LGM_GCLK_CQEM 177
+#define LGM_GCLK_XPCS5 178
+#define LGM_GCLK_USB1 179
+#define LGM_GCLK_USB2 180
+
+#endif /* __INTEL_LGM_CLK_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
new file mode 100644
index 000000000000..20664776f497
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+#ifndef __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H
+#define __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H
+
+#define MMP2_CLK_AUDIO_SYSCLK 0
+#define MMP2_CLK_AUDIO_SSPA0 1
+#define MMP2_CLK_AUDIO_SSPA1 2
+
+#define MMP2_CLK_AUDIO_NR_CLKS 3
+#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 06bb7fe4c62f..87f5ad5df72f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -29,6 +29,8 @@
#define MMP3_CLK_PLL1_P 28
#define MMP3_CLK_PLL2_P 29
#define MMP3_CLK_PLL3 30
+#define MMP2_CLK_I2S0 31
+#define MMP2_CLK_I2S1 32
/* apb periphrals */
#define MMP2_CLK_TWSI0 60
@@ -87,6 +89,7 @@
#define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D
#define MMP3_CLK_GPU_2D 125
#define MMP3_CLK_SDH4 126
+#define MMP2_CLK_AUDIO 127
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 68862aaf977e..4c5965ae1df4 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -107,6 +107,7 @@
#define CLKID_PERIPH 126
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_HDMI_SYS 174
#define CLKID_VPU 190
#define CLKID_VDEC_1 196
#define CLKID_VDEC_HCODEC 199
diff --git a/include/dt-bindings/clock/mt6765-clk.h b/include/dt-bindings/clock/mt6765-clk.h
new file mode 100644
index 000000000000..eb97e568518e
--- /dev/null
+++ b/include/dt-bindings/clock/mt6765-clk.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_MT6765_H
+#define _DT_BINDINGS_CLK_MT6765_H
+
+/* FIX Clks */
+#define CLK_TOP_CLK26M 0
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL_L 0
+#define CLK_APMIXED_ARMPLL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_MFGPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_UNIV2PLL 6
+#define CLK_APMIXED_MSDCPLL 7
+#define CLK_APMIXED_APLL1 8
+#define CLK_APMIXED_MPLL 9
+#define CLK_APMIXED_ULPOSC1 10
+#define CLK_APMIXED_ULPOSC2 11
+#define CLK_APMIXED_SSUSB26M 12
+#define CLK_APMIXED_APPLL26M 13
+#define CLK_APMIXED_MIPIC0_26M 14
+#define CLK_APMIXED_MDPLLGP26M 15
+#define CLK_APMIXED_MMSYS_F26M 16
+#define CLK_APMIXED_UFS26M 17
+#define CLK_APMIXED_MIPIC1_26M 18
+#define CLK_APMIXED_MEMPLL26M 19
+#define CLK_APMIXED_CLKSQ_LVPLL_26M 20
+#define CLK_APMIXED_MIPID0_26M 21
+#define CLK_APMIXED_NR_CLK 22
+
+/* TOPCKGEN */
+#define CLK_TOP_SYSPLL 0
+#define CLK_TOP_SYSPLL_D2 1
+#define CLK_TOP_SYSPLL1_D2 2
+#define CLK_TOP_SYSPLL1_D4 3
+#define CLK_TOP_SYSPLL1_D8 4
+#define CLK_TOP_SYSPLL1_D16 5
+#define CLK_TOP_SYSPLL_D3 6
+#define CLK_TOP_SYSPLL2_D2 7
+#define CLK_TOP_SYSPLL2_D4 8
+#define CLK_TOP_SYSPLL2_D8 9
+#define CLK_TOP_SYSPLL_D5 10
+#define CLK_TOP_SYSPLL3_D2 11
+#define CLK_TOP_SYSPLL3_D4 12
+#define CLK_TOP_SYSPLL_D7 13
+#define CLK_TOP_SYSPLL4_D2 14
+#define CLK_TOP_SYSPLL4_D4 15
+#define CLK_TOP_USB20_192M 16
+#define CLK_TOP_USB20_192M_D4 17
+#define CLK_TOP_USB20_192M_D8 18
+#define CLK_TOP_USB20_192M_D16 19
+#define CLK_TOP_USB20_192M_D32 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_MPLL 35
+#define CLK_TOP_DA_MPLL_104M_DIV 36
+#define CLK_TOP_DA_MPLL_52M_DIV 37
+#define CLK_TOP_MFGPLL 38
+#define CLK_TOP_MSDCPLL 39
+#define CLK_TOP_MSDCPLL_D2 40
+#define CLK_TOP_APLL1 41
+#define CLK_TOP_APLL1_D2 42
+#define CLK_TOP_APLL1_D4 43
+#define CLK_TOP_APLL1_D8 44
+#define CLK_TOP_ULPOSC1 45
+#define CLK_TOP_ULPOSC1_D2 46
+#define CLK_TOP_ULPOSC1_D4 47
+#define CLK_TOP_ULPOSC1_D8 48
+#define CLK_TOP_ULPOSC1_D16 49
+#define CLK_TOP_ULPOSC1_D32 50
+#define CLK_TOP_DMPLL 51
+#define CLK_TOP_F_FRTC 52
+#define CLK_TOP_F_F26M 53
+#define CLK_TOP_AXI 54
+#define CLK_TOP_MM 55
+#define CLK_TOP_SCP 56
+#define CLK_TOP_MFG 57
+#define CLK_TOP_F_FUART 58
+#define CLK_TOP_SPI 59
+#define CLK_TOP_MSDC50_0 60
+#define CLK_TOP_MSDC30_1 61
+#define CLK_TOP_AUDIO 62
+#define CLK_TOP_AUD_1 63
+#define CLK_TOP_AUD_ENGEN1 64
+#define CLK_TOP_F_FDISP_PWM 65
+#define CLK_TOP_SSPM 66
+#define CLK_TOP_DXCC 67
+#define CLK_TOP_I2C 68
+#define CLK_TOP_F_FPWM 69
+#define CLK_TOP_F_FSENINF 70
+#define CLK_TOP_AES_FDE 71
+#define CLK_TOP_F_BIST2FPC 72
+#define CLK_TOP_ARMPLL_DIVIDER_PLL0 73
+#define CLK_TOP_ARMPLL_DIVIDER_PLL1 74
+#define CLK_TOP_ARMPLL_DIVIDER_PLL2 75
+#define CLK_TOP_DA_USB20_48M_DIV 76
+#define CLK_TOP_DA_UNIV_48M_DIV 77
+#define CLK_TOP_APLL12_DIV0 78
+#define CLK_TOP_APLL12_DIV1 79
+#define CLK_TOP_APLL12_DIV2 80
+#define CLK_TOP_APLL12_DIV3 81
+#define CLK_TOP_ARMPLL_DIVIDER_PLL0_EN 82
+#define CLK_TOP_ARMPLL_DIVIDER_PLL1_EN 83
+#define CLK_TOP_ARMPLL_DIVIDER_PLL2_EN 84
+#define CLK_TOP_FMEM_OCC_DRC_EN 85
+#define CLK_TOP_USB20_48M_EN 86
+#define CLK_TOP_UNIVPLL_48M_EN 87
+#define CLK_TOP_MPLL_104M_EN 88
+#define CLK_TOP_MPLL_52M_EN 89
+#define CLK_TOP_F_UFS_MP_SAP_CFG_EN 90
+#define CLK_TOP_F_BIST2FPC_EN 91
+#define CLK_TOP_MD_32K 92
+#define CLK_TOP_MD_26M 93
+#define CLK_TOP_MD2_32K 94
+#define CLK_TOP_MD2_26M 95
+#define CLK_TOP_AXI_SEL 96
+#define CLK_TOP_MEM_SEL 97
+#define CLK_TOP_MM_SEL 98
+#define CLK_TOP_SCP_SEL 99
+#define CLK_TOP_MFG_SEL 100
+#define CLK_TOP_ATB_SEL 101
+#define CLK_TOP_CAMTG_SEL 102
+#define CLK_TOP_CAMTG1_SEL 103
+#define CLK_TOP_CAMTG2_SEL 104
+#define CLK_TOP_CAMTG3_SEL 105
+#define CLK_TOP_UART_SEL 106
+#define CLK_TOP_SPI_SEL 107
+#define CLK_TOP_MSDC50_0_HCLK_SEL 108
+#define CLK_TOP_MSDC50_0_SEL 109
+#define CLK_TOP_MSDC30_1_SEL 110
+#define CLK_TOP_AUDIO_SEL 111
+#define CLK_TOP_AUD_INTBUS_SEL 112
+#define CLK_TOP_AUD_1_SEL 113
+#define CLK_TOP_AUD_ENGEN1_SEL 114
+#define CLK_TOP_DISP_PWM_SEL 115
+#define CLK_TOP_SSPM_SEL 116
+#define CLK_TOP_DXCC_SEL 117
+#define CLK_TOP_USB_TOP_SEL 118
+#define CLK_TOP_SPM_SEL 119
+#define CLK_TOP_I2C_SEL 120
+#define CLK_TOP_PWM_SEL 121
+#define CLK_TOP_SENINF_SEL 122
+#define CLK_TOP_AES_FDE_SEL 123
+#define CLK_TOP_PWRAP_ULPOSC_SEL 124
+#define CLK_TOP_CAMTM_SEL 125
+#define CLK_TOP_NR_CLK 126
+
+/* INFRACFG */
+#define CLK_IFR_ICUSB 0
+#define CLK_IFR_GCE 1
+#define CLK_IFR_THERM 2
+#define CLK_IFR_I2C_AP 3
+#define CLK_IFR_I2C_CCU 4
+#define CLK_IFR_I2C_SSPM 5
+#define CLK_IFR_I2C_RSV 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_GCE_26M 16
+#define CLK_IFR_CQ_DMA_FPC 17
+#define CLK_IFR_BTIF 18
+#define CLK_IFR_SPI0 19
+#define CLK_IFR_MSDC0 20
+#define CLK_IFR_MSDC1 21
+#define CLK_IFR_TRNG 22
+#define CLK_IFR_AUXADC 23
+#define CLK_IFR_CCIF1_AP 24
+#define CLK_IFR_CCIF1_MD 25
+#define CLK_IFR_AUXADC_MD 26
+#define CLK_IFR_AP_DMA 27
+#define CLK_IFR_DEVICE_APC 28
+#define CLK_IFR_CCIF_AP 29
+#define CLK_IFR_AUDIO 30
+#define CLK_IFR_CCIF_MD 31
+#define CLK_IFR_RG_PWM_FBCLK6 32
+#define CLK_IFR_DISP_PWM 33
+#define CLK_IFR_CLDMA_BCLK 34
+#define CLK_IFR_AUDIO_26M_BCLK 35
+#define CLK_IFR_SPI1 36
+#define CLK_IFR_I2C4 37
+#define CLK_IFR_SPI2 38
+#define CLK_IFR_SPI3 39
+#define CLK_IFR_I2C5 40
+#define CLK_IFR_I2C5_ARBITER 41
+#define CLK_IFR_I2C5_IMM 42
+#define CLK_IFR_I2C1_ARBITER 43
+#define CLK_IFR_I2C1_IMM 44
+#define CLK_IFR_I2C2_ARBITER 45
+#define CLK_IFR_I2C2_IMM 46
+#define CLK_IFR_SPI4 47
+#define CLK_IFR_SPI5 48
+#define CLK_IFR_CQ_DMA 49
+#define CLK_IFR_FAES_FDE 50
+#define CLK_IFR_MSDC0_SELF 51
+#define CLK_IFR_MSDC1_SELF 52
+#define CLK_IFR_I2C6 53
+#define CLK_IFR_AP_MSDC0 54
+#define CLK_IFR_MD_MSDC0 55
+#define CLK_IFR_MSDC0_SRC 56
+#define CLK_IFR_MSDC1_SRC 57
+#define CLK_IFR_AES_TOP0_BCLK 58
+#define CLK_IFR_MCU_PM_BCLK 59
+#define CLK_IFR_CCIF2_AP 60
+#define CLK_IFR_CCIF2_MD 61
+#define CLK_IFR_CCIF3_AP 62
+#define CLK_IFR_CCIF3_MD 63
+#define CLK_IFR_NR_CLK 64
+
+/* AUDIO */
+#define CLK_AUDIO_AFE 0
+#define CLK_AUDIO_22M 1
+#define CLK_AUDIO_APLL_TUNER 2
+#define CLK_AUDIO_ADC 3
+#define CLK_AUDIO_DAC 4
+#define CLK_AUDIO_DAC_PREDIS 5
+#define CLK_AUDIO_TML 6
+#define CLK_AUDIO_I2S1_BCLK 7
+#define CLK_AUDIO_I2S2_BCLK 8
+#define CLK_AUDIO_I2S3_BCLK 9
+#define CLK_AUDIO_I2S4_BCLK 10
+#define CLK_AUDIO_NR_CLK 11
+
+/* MIPI_RX_ANA_CSI0A */
+
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI0A_NR_CLK 1
+
+/* MMSYS_CONFIG */
+
+#define CLK_MM_MDP_RDMA0 0
+#define CLK_MM_MDP_CCORR0 1
+#define CLK_MM_MDP_RSZ0 2
+#define CLK_MM_MDP_RSZ1 3
+#define CLK_MM_MDP_TDSHP0 4
+#define CLK_MM_MDP_WROT0 5
+#define CLK_MM_MDP_WDMA0 6
+#define CLK_MM_DISP_OVL0 7
+#define CLK_MM_DISP_OVL0_2L 8
+#define CLK_MM_DISP_RSZ0 9
+#define CLK_MM_DISP_RDMA0 10
+#define CLK_MM_DISP_WDMA0 11
+#define CLK_MM_DISP_COLOR0 12
+#define CLK_MM_DISP_CCORR0 13
+#define CLK_MM_DISP_AAL0 14
+#define CLK_MM_DISP_GAMMA0 15
+#define CLK_MM_DISP_DITHER0 16
+#define CLK_MM_DSI0 17
+#define CLK_MM_FAKE_ENG 18
+#define CLK_MM_SMI_COMMON 19
+#define CLK_MM_SMI_LARB0 20
+#define CLK_MM_SMI_COMM0 21
+#define CLK_MM_SMI_COMM1 22
+#define CLK_MM_CAM_MDP 23
+#define CLK_MM_SMI_IMG 24
+#define CLK_MM_SMI_CAM 25
+#define CLK_MM_IMG_DL_RELAY 26
+#define CLK_MM_IMG_DL_ASYNC_TOP 27
+#define CLK_MM_DIG_DSI 28
+#define CLK_MM_F26M_HRTWT 29
+#define CLK_MM_NR_CLK 30
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB2 0
+#define CLK_IMG_DIP 1
+#define CLK_IMG_FDVT 2
+#define CLK_IMG_DPE 3
+#define CLK_IMG_RSC 4
+#define CLK_IMG_NR_CLK 5
+
+/* VENCSYS */
+
+#define CLK_VENC_SET0_LARB 0
+#define CLK_VENC_SET1_VENC 1
+#define CLK_VENC_SET2_JPGENC 2
+#define CLK_VENC_SET3_VDEC 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB3 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM 2
+#define CLK_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAMSV0 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAM_CCU 8
+#define CLK_CAM_NR_CLK 9
+
+#endif /* _DT_BINDINGS_CLK_MT6765_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
new file mode 100644
index 000000000000..0634467c4ce5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8939_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8939_H
+
+#define GPLL0 0
+#define GPLL0_VOTE 1
+#define BIMC_PLL 2
+#define BIMC_PLL_VOTE 3
+#define GPLL1 4
+#define GPLL1_VOTE 5
+#define GPLL2 6
+#define GPLL2_VOTE 7
+#define PCNOC_BFDCD_CLK_SRC 8
+#define SYSTEM_NOC_BFDCD_CLK_SRC 9
+#define CAMSS_AHB_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define CSI0_CLK_SRC 12
+#define CSI1_CLK_SRC 13
+#define GFX3D_CLK_SRC 14
+#define VFE0_CLK_SRC 15
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27
+#define BLSP1_UART1_APPS_CLK_SRC 28
+#define BLSP1_UART2_APPS_CLK_SRC 29
+#define CCI_CLK_SRC 30
+#define CAMSS_GP0_CLK_SRC 31
+#define CAMSS_GP1_CLK_SRC 32
+#define JPEG0_CLK_SRC 33
+#define MCLK0_CLK_SRC 34
+#define MCLK1_CLK_SRC 35
+#define CSI0PHYTIMER_CLK_SRC 36
+#define CSI1PHYTIMER_CLK_SRC 37
+#define CPP_CLK_SRC 38
+#define CRYPTO_CLK_SRC 39
+#define GP1_CLK_SRC 40
+#define GP2_CLK_SRC 41
+#define GP3_CLK_SRC 42
+#define BYTE0_CLK_SRC 43
+#define ESC0_CLK_SRC 44
+#define MDP_CLK_SRC 45
+#define PCLK0_CLK_SRC 46
+#define VSYNC_CLK_SRC 47
+#define PDM2_CLK_SRC 48
+#define SDCC1_APPS_CLK_SRC 49
+#define SDCC2_APPS_CLK_SRC 50
+#define APSS_TCU_CLK_SRC 51
+#define USB_HS_SYSTEM_CLK_SRC 52
+#define VCODEC0_CLK_SRC 53
+#define GCC_BLSP1_AHB_CLK 54
+#define GCC_BLSP1_SLEEP_CLK 55
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67
+#define GCC_BLSP1_UART1_APPS_CLK 68
+#define GCC_BLSP1_UART2_APPS_CLK 69
+#define GCC_BOOT_ROM_AHB_CLK 70
+#define GCC_CAMSS_CCI_AHB_CLK 71
+#define GCC_CAMSS_CCI_CLK 72
+#define GCC_CAMSS_CSI0_AHB_CLK 73
+#define GCC_CAMSS_CSI0_CLK 74
+#define GCC_CAMSS_CSI0PHY_CLK 75
+#define GCC_CAMSS_CSI0PIX_CLK 76
+#define GCC_CAMSS_CSI0RDI_CLK 77
+#define GCC_CAMSS_CSI1_AHB_CLK 78
+#define GCC_CAMSS_CSI1_CLK 79
+#define GCC_CAMSS_CSI1PHY_CLK 80
+#define GCC_CAMSS_CSI1PIX_CLK 81
+#define GCC_CAMSS_CSI1RDI_CLK 82
+#define GCC_CAMSS_CSI_VFE0_CLK 83
+#define GCC_CAMSS_GP0_CLK 84
+#define GCC_CAMSS_GP1_CLK 85
+#define GCC_CAMSS_ISPIF_AHB_CLK 86
+#define GCC_CAMSS_JPEG0_CLK 87
+#define GCC_CAMSS_JPEG_AHB_CLK 88
+#define GCC_CAMSS_JPEG_AXI_CLK 89
+#define GCC_CAMSS_MCLK0_CLK 90
+#define GCC_CAMSS_MCLK1_CLK 91
+#define GCC_CAMSS_MICRO_AHB_CLK 92
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 93
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 94
+#define GCC_CAMSS_AHB_CLK 95
+#define GCC_CAMSS_TOP_AHB_CLK 96
+#define GCC_CAMSS_CPP_AHB_CLK 97
+#define GCC_CAMSS_CPP_CLK 98
+#define GCC_CAMSS_VFE0_CLK 99
+#define GCC_CAMSS_VFE_AHB_CLK 100
+#define GCC_CAMSS_VFE_AXI_CLK 101
+#define GCC_CRYPTO_AHB_CLK 102
+#define GCC_CRYPTO_AXI_CLK 103
+#define GCC_CRYPTO_CLK 104
+#define GCC_OXILI_GMEM_CLK 105
+#define GCC_GP1_CLK 106
+#define GCC_GP2_CLK 107
+#define GCC_GP3_CLK 108
+#define GCC_MDSS_AHB_CLK 109
+#define GCC_MDSS_AXI_CLK 110
+#define GCC_MDSS_BYTE0_CLK 111
+#define GCC_MDSS_ESC0_CLK 112
+#define GCC_MDSS_MDP_CLK 113
+#define GCC_MDSS_PCLK0_CLK 114
+#define GCC_MDSS_VSYNC_CLK 115
+#define GCC_MSS_CFG_AHB_CLK 116
+#define GCC_OXILI_AHB_CLK 117
+#define GCC_OXILI_GFX3D_CLK 118
+#define GCC_PDM2_CLK 119
+#define GCC_PDM_AHB_CLK 120
+#define GCC_PRNG_AHB_CLK 121
+#define GCC_SDCC1_AHB_CLK 122
+#define GCC_SDCC1_APPS_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_GTCU_AHB_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_SMMU_CFG_CLK 129
+#define GCC_VENUS_TBU_CLK 130
+#define GCC_VFE_TBU_CLK 131
+#define GCC_USB2A_PHY_SLEEP_CLK 132
+#define GCC_USB_HS_AHB_CLK 133
+#define GCC_USB_HS_SYSTEM_CLK 134
+#define GCC_VENUS0_AHB_CLK 135
+#define GCC_VENUS0_AXI_CLK 136
+#define GCC_VENUS0_VCODEC0_CLK 137
+#define BIMC_DDR_CLK_SRC 138
+#define GCC_APSS_TCU_CLK 139
+#define GCC_GFX_TCU_CLK 140
+#define BIMC_GPU_CLK_SRC 141
+#define GCC_BIMC_GFX_CLK 142
+#define GCC_BIMC_GPU_CLK 143
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146
+#define ULTAUDIO_XO_CLK_SRC 147
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 148
+#define CODEC_DIGCODEC_CLK_SRC 149
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152
+#define GCC_ULTAUDIO_STC_XO_CLK 153
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158
+#define GCC_CODEC_DIGCODEC_CLK 159
+#define GCC_MSS_Q6_BIMC_AXI_CLK 160
+#define GPLL3 161
+#define GPLL3_VOTE 162
+#define GPLL4 163
+#define GPLL4_VOTE 164
+#define GPLL5 165
+#define GPLL5_VOTE 166
+#define GPLL6 167
+#define GPLL6_VOTE 168
+#define BYTE1_CLK_SRC 169
+#define GCC_MDSS_BYTE1_CLK 170
+#define ESC1_CLK_SRC 171
+#define GCC_MDSS_ESC1_CLK 172
+#define PCLK1_CLK_SRC 173
+#define GCC_MDSS_PCLK1_CLK 174
+#define GCC_GFX_TBU_CLK 175
+#define GCC_CPP_TBU_CLK 176
+#define GCC_MDP_RT_TBU_CLK 177
+#define USB_FS_SYSTEM_CLK_SRC 178
+#define USB_FS_IC_CLK_SRC 179
+#define GCC_USB_FS_AHB_CLK 180
+#define GCC_USB_FS_IC_CLK 181
+#define GCC_USB_FS_SYSTEM_CLK 182
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 183
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
+#define GCC_OXILI_TIMER_CLK 185
+
+/* Indexes for GDSCs */
+#define BIMC_GDSC 0
+#define VENUS_GDSC 1
+#define MDSS_GDSC 2
+#define JPEG_GDSC 3
+#define VFE_GDSC 4
+#define OXILI_GDSC 5
+#define VENUS_CORE0_GDSC 6
+#define VENUS_CORE1_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 63e02dc32a0b..6a73a174f049 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -183,6 +183,7 @@
#define GCC_MSS_SNOC_AXI_CLK 174
#define GCC_MSS_MNOC_BIMC_AXI_CLK 175
#define GCC_BIMC_GFX_CLK 176
+#define UFS_UNIPRO_CORE_CLK_SRC 177
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
index 1258fd05db68..992b67b7e5e4 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -137,6 +137,7 @@
#define GCC_MSS_NAV_AXI_CLK 127
#define GCC_MSS_Q6_MEMNOC_AXI_CLK 128
#define GCC_MSS_SNOC_AXI_CLK 129
+#define GCC_SEC_CTRL_CLK_SRC 130
/* GCC resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h
index 901ba59676c2..4e030421641f 100644
--- a/include/dt-bindings/clock/sprd,sc9863a-clk.h
+++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h
@@ -308,6 +308,11 @@
#define CLK_MCPHY_CFG_EB 14
#define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1)
+#define CLK_MIPI_CSI 0
+#define CLK_MIPI_CSI_S 1
+#define CLK_MIPI_CSI_M 2
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI_M + 1)
+
#define CLK_SIM0_EB 0
#define CLK_IIS0_EB 1
#define CLK_IIS1_EB 2
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index ae62cd72da67..ab8b8a737a0a 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -351,8 +351,8 @@
#define TEGRA210_CLK_PLL_P_OUT_XUSB 317
#define TEGRA210_CLK_XUSB_SSP_SRC 318
#define TEGRA210_CLK_PLL_RE_OUT1 319
-/* 320 */
-/* 321 */
+#define TEGRA210_CLK_PLL_MB_UD 320
+#define TEGRA210_CLK_PLL_P_UD 321
#define TEGRA210_CLK_ISP 322
#define TEGRA210_CLK_PLL_A_OUT_ADSP 323
#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h
index bbaebaf7adb9..0367c8c02e16 100644
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ b/include/dt-bindings/clock/x1000-cgu.h
@@ -12,33 +12,41 @@
#ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__
#define __DT_BINDINGS_CLOCK_X1000_CGU_H__
-#define X1000_CLK_EXCLK 0
-#define X1000_CLK_RTCLK 1
-#define X1000_CLK_APLL 2
-#define X1000_CLK_MPLL 3
-#define X1000_CLK_SCLKA 4
-#define X1000_CLK_CPUMUX 5
-#define X1000_CLK_CPU 6
-#define X1000_CLK_L2CACHE 7
-#define X1000_CLK_AHB0 8
-#define X1000_CLK_AHB2PMUX 9
-#define X1000_CLK_AHB2 10
-#define X1000_CLK_PCLK 11
-#define X1000_CLK_DDR 12
-#define X1000_CLK_MAC 13
-#define X1000_CLK_MSCMUX 14
-#define X1000_CLK_MSC0 15
-#define X1000_CLK_MSC1 16
-#define X1000_CLK_SSIPLL 17
-#define X1000_CLK_SSIMUX 18
-#define X1000_CLK_SFC 19
-#define X1000_CLK_I2C0 20
-#define X1000_CLK_I2C1 21
-#define X1000_CLK_I2C2 22
-#define X1000_CLK_UART0 23
-#define X1000_CLK_UART1 24
-#define X1000_CLK_UART2 25
-#define X1000_CLK_SSI 26
-#define X1000_CLK_PDMA 27
+#define X1000_CLK_EXCLK 0
+#define X1000_CLK_RTCLK 1
+#define X1000_CLK_APLL 2
+#define X1000_CLK_MPLL 3
+#define X1000_CLK_OTGPHY 4
+#define X1000_CLK_SCLKA 5
+#define X1000_CLK_CPUMUX 6
+#define X1000_CLK_CPU 7
+#define X1000_CLK_L2CACHE 8
+#define X1000_CLK_AHB0 9
+#define X1000_CLK_AHB2PMUX 10
+#define X1000_CLK_AHB2 11
+#define X1000_CLK_PCLK 12
+#define X1000_CLK_DDR 13
+#define X1000_CLK_MAC 14
+#define X1000_CLK_LCD 15
+#define X1000_CLK_MSCMUX 16
+#define X1000_CLK_MSC0 17
+#define X1000_CLK_MSC1 18
+#define X1000_CLK_OTG 19
+#define X1000_CLK_SSIPLL 20
+#define X1000_CLK_SSIPLL_DIV2 21
+#define X1000_CLK_SSIMUX 22
+#define X1000_CLK_EMC 23
+#define X1000_CLK_EFUSE 24
+#define X1000_CLK_SFC 25
+#define X1000_CLK_I2C0 26
+#define X1000_CLK_I2C1 27
+#define X1000_CLK_I2C2 28
+#define X1000_CLK_UART0 29
+#define X1000_CLK_UART1 30
+#define X1000_CLK_UART2 31
+#define X1000_CLK_TCU 32
+#define X1000_CLK_SSI 33
+#define X1000_CLK_OST 34
+#define X1000_CLK_PDMA 35
#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/x1830-cgu.h
new file mode 100644
index 000000000000..801e1d09c881
--- /dev/null
+++ b/include/dt-bindings/clock/x1830-cgu.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,x1830-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the x1830 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_X1830_CGU_H__
+#define __DT_BINDINGS_CLOCK_X1830_CGU_H__
+
+#define X1830_CLK_EXCLK 0
+#define X1830_CLK_RTCLK 1
+#define X1830_CLK_APLL 2
+#define X1830_CLK_MPLL 3
+#define X1830_CLK_EPLL 4
+#define X1830_CLK_VPLL 5
+#define X1830_CLK_OTGPHY 6
+#define X1830_CLK_SCLKA 7
+#define X1830_CLK_CPUMUX 8
+#define X1830_CLK_CPU 9
+#define X1830_CLK_L2CACHE 10
+#define X1830_CLK_AHB0 11
+#define X1830_CLK_AHB2PMUX 12
+#define X1830_CLK_AHB2 13
+#define X1830_CLK_PCLK 14
+#define X1830_CLK_DDR 15
+#define X1830_CLK_MAC 16
+#define X1830_CLK_LCD 17
+#define X1830_CLK_MSCMUX 18
+#define X1830_CLK_MSC0 19
+#define X1830_CLK_MSC1 20
+#define X1830_CLK_SSIPLL 21
+#define X1830_CLK_SSIPLL_DIV2 22
+#define X1830_CLK_SSIMUX 23
+#define X1830_CLK_EMC 24
+#define X1830_CLK_EFUSE 25
+#define X1830_CLK_OTG 26
+#define X1830_CLK_SSI0 27
+#define X1830_CLK_SMB0 28
+#define X1830_CLK_SMB1 29
+#define X1830_CLK_SMB2 30
+#define X1830_CLK_UART0 31
+#define X1830_CLK_UART1 32
+#define X1830_CLK_SSI1 33
+#define X1830_CLK_SFC 34
+#define X1830_CLK_PDMA 35
+#define X1830_CLK_TCU 36
+#define X1830_CLK_DTRNG 37
+#define X1830_CLK_OST 38
+
+#endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
new file mode 100644
index 000000000000..4c23eefed5f3
--- /dev/null
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MAILBOX_IPCC_H
+#define __DT_BINDINGS_MAILBOX_IPCC_H
+
+/* Signal IDs for MPROC protocol */
+#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
+#define IPCC_MPROC_SIGNAL_SMP2P 2
+#define IPCC_MPROC_SIGNAL_PING 3
+
+/* Client IDs */
+#define IPCC_CLIENT_AOP 0
+#define IPCC_CLIENT_TZ 1
+#define IPCC_CLIENT_MPSS 2
+#define IPCC_CLIENT_LPASS 3
+#define IPCC_CLIENT_SLPI 4
+#define IPCC_CLIENT_SDC 5
+#define IPCC_CLIENT_CDSP 6
+#define IPCC_CLIENT_NPU 7
+#define IPCC_CLIENT_APSS 8
+#define IPCC_CLIENT_GPU 9
+#define IPCC_CLIENT_CVP 10
+#define IPCC_CLIENT_CAM 11
+#define IPCC_CLIENT_VPU 12
+#define IPCC_CLIENT_PCIE0 13
+#define IPCC_CLIENT_PCIE1 14
+#define IPCC_CLIENT_PCIE2 15
+#define IPCC_CLIENT_SPSS 16
+
+#endif
diff --git a/include/dt-bindings/power/marvell,mmp2.h b/include/dt-bindings/power/marvell,mmp2.h
new file mode 100644
index 000000000000..c53d2b3e1057
--- /dev/null
+++ b/include/dt-bindings/power/marvell,mmp2.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DTS_MARVELL_MMP2_POWER_H
+#define __DTS_MARVELL_MMP2_POWER_H
+
+#define MMP2_POWER_DOMAIN_GPU 0
+#define MMP2_POWER_DOMAIN_AUDIO 1
+#define MMP3_POWER_DOMAIN_CAMERA 2
+
+#define MMP2_NR_POWER_DOMAINS 3
+
+#endif
diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h
new file mode 100644
index 000000000000..3578e83026bc
--- /dev/null
+++ b/include/dt-bindings/reset/bt1-ccu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU reset indices
+ */
+#ifndef __DT_BINDINGS_RESET_BT1_CCU_H
+#define __DT_BINDINGS_RESET_BT1_CCU_H
+
+#define CCU_AXI_MAIN_RST 0
+#define CCU_AXI_DDR_RST 1
+#define CCU_AXI_SATA_RST 2
+#define CCU_AXI_GMAC0_RST 3
+#define CCU_AXI_GMAC1_RST 4
+#define CCU_AXI_XGMAC_RST 5
+#define CCU_AXI_PCIE_M_RST 6
+#define CCU_AXI_PCIE_S_RST 7
+#define CCU_AXI_USB_RST 8
+#define CCU_AXI_HWA_RST 9
+#define CCU_AXI_SRAM_RST 10
+
+#define CCU_SYS_SATA_REF_RST 0
+#define CCU_SYS_APB_RST 1
+
+#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8939.h b/include/dt-bindings/reset/qcom,gcc-msm8939.h
new file mode 100644
index 000000000000..fa41ffeae7a2
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-msm8939.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_MSM_GCC_8939_H
+#define _DT_BINDINGS_RESET_MSM_GCC_8939_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_IMEM_BCR 9
+#define GCC_SMMU_BCR 10
+#define GCC_APSS_TCU_BCR 11
+#define GCC_SMMU_XPU_BCR 12
+#define GCC_PCNOC_TBU_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_CRYPTO_BCR 16
+#define GCC_SEC_CTRL_BCR 17
+#define GCC_AUDIO_CORE_BCR 18
+#define GCC_ULT_AUDIO_BCR 19
+#define GCC_DEHR_BCR 20
+#define GCC_SYSTEM_NOC_BCR 21
+#define GCC_PCNOC_BCR 22
+#define GCC_TCSR_BCR 23
+#define GCC_QDSS_BCR 24
+#define GCC_DCD_BCR 25
+#define GCC_MSG_RAM_BCR 26
+#define GCC_MPM_BCR 27
+#define GCC_SPMI_BCR 28
+#define GCC_SPDM_BCR 29
+#define GCC_MM_SPDM_BCR 30
+#define GCC_BIMC_BCR 31
+#define GCC_RBCPR_BCR 32
+#define GCC_TLMM_BCR 33
+#define GCC_USB_HS_BCR 34
+#define GCC_USB2A_PHY_BCR 35
+#define GCC_SDCC1_BCR 36
+#define GCC_SDCC2_BCR 37
+#define GCC_PDM_BCR 38
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 39
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49
+#define GCC_MMSS_BCR 50
+#define GCC_VENUS0_BCR 51
+#define GCC_MDSS_BCR 52
+#define GCC_CAMSS_PHY0_BCR 53
+#define GCC_CAMSS_CSI0_BCR 54
+#define GCC_CAMSS_CSI0PHY_BCR 55
+#define GCC_CAMSS_CSI0RDI_BCR 56
+#define GCC_CAMSS_CSI0PIX_BCR 57
+#define GCC_CAMSS_PHY1_BCR 58
+#define GCC_CAMSS_CSI1_BCR 59
+#define GCC_CAMSS_CSI1PHY_BCR 60
+#define GCC_CAMSS_CSI1RDI_BCR 61
+#define GCC_CAMSS_CSI1PIX_BCR 62
+#define GCC_CAMSS_ISPIF_BCR 63
+#define GCC_CAMSS_CCI_BCR 64
+#define GCC_CAMSS_MCLK0_BCR 65
+#define GCC_CAMSS_MCLK1_BCR 66
+#define GCC_CAMSS_GP0_BCR 67
+#define GCC_CAMSS_GP1_BCR 68
+#define GCC_CAMSS_TOP_BCR 69
+#define GCC_CAMSS_MICRO_BCR 70
+#define GCC_CAMSS_JPEG_BCR 71
+#define GCC_CAMSS_VFE_BCR 72
+#define GCC_CAMSS_CSI_VFE0_BCR 73
+#define GCC_OXILI_BCR 74
+#define GCC_GMEM_BCR 75
+#define GCC_CAMSS_AHB_BCR 76
+#define GCC_MDP_TBU_BCR 77
+#define GCC_GFX_TBU_BCR 78
+#define GCC_GFX_TCU_BCR 79
+#define GCC_MSS_TBU_AXI_BCR 80
+#define GCC_MSS_TBU_GSS_AXI_BCR 81
+#define GCC_MSS_TBU_Q6_AXI_BCR 82
+#define GCC_GTCU_AHB_BCR 83
+#define GCC_SMMU_CFG_BCR 84
+#define GCC_VFE_TBU_BCR 85
+#define GCC_VENUS_TBU_BCR 86
+#define GCC_JPEG_TBU_BCR 87
+#define GCC_PRONTO_TBU_BCR 88
+#define GCC_SMMU_CATS_BCR 89
+#define GCC_BLSP1_UART3_BCR 90
+#define GCC_CAMSS_CSI2_BCR 91
+#define GCC_CAMSS_CSI2PHY_BCR 92
+#define GCC_CAMSS_CSI2RDI_BCR 93
+#define GCC_CAMSS_CSI2PIX_BCR 94
+#define GCC_USB_FS_BCR 95
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR 96
+#define GCC_CAMSS_MCLK2_BCR 97
+#define GCC_CPP_TBU_BCR 98
+#define GCC_MDP_RT_TBU_BCR 99
+
+#endif
diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h
index 9e9ccb20d586..38afb341c3f2 100644
--- a/include/keys/encrypted-type.h
+++ b/include/keys/encrypted-type.h
@@ -27,7 +27,7 @@ struct encrypted_key_payload {
unsigned short payload_datalen; /* payload data length */
unsigned short encrypted_key_format; /* encrypted key format */
u8 *decrypted_data; /* decrypted data */
- u8 payload_data[0]; /* payload data + datablob + hmac */
+ u8 payload_data[]; /* payload data + datablob + hmac */
};
extern struct key_type key_type_encrypted;
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index a183278c3e9e..2b0b15a71228 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -28,7 +28,7 @@ struct rxkad_key {
u8 primary_flag; /* T if key for primary cell for this user */
u16 ticket_len; /* length of ticket[] */
u8 session_key[8]; /* DES session key */
- u8 ticket[0]; /* the encrypted ticket */
+ u8 ticket[]; /* the encrypted ticket */
};
/*
@@ -100,7 +100,7 @@ struct rxrpc_key_data_v1 {
u32 expiry; /* time_t */
u32 kvno;
u8 session_key[8];
- u8 ticket[0];
+ u8 ticket[];
};
/*
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 9b0c46a6ca1f..47e61e1d5337 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -175,7 +175,7 @@ struct kunit_suite {
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
- /* private - internal use only */
+ /* private: internal use only */
struct dentry *debugfs;
char *log;
};
@@ -232,12 +232,12 @@ void __kunit_test_suites_exit(struct kunit_suite **suites);
* kunit_test_suites() - used to register one or more &struct kunit_suite
* with KUnit.
*
- * @suites: a statically allocated list of &struct kunit_suite.
+ * @suites_list...: a statically allocated list of &struct kunit_suite.
*
- * Registers @suites with the test framework. See &struct kunit_suite for
+ * Registers @suites_list with the test framework. See &struct kunit_suite for
* more information.
*
- * When builtin, KUnit tests are all run as late_initcalls; this means
+ * When builtin, KUnit tests are all run as late_initcalls; this means
* that they cannot test anything where tests must run at a different init
* phase. One significant restriction resulting from this is that KUnit
* cannot reliably test anything that is initialize in the late_init phase;
@@ -253,8 +253,8 @@ void __kunit_test_suites_exit(struct kunit_suite **suites);
* tests from the same place, and at the very least to do so after
* everything else is definitely initialized.
*/
-#define kunit_test_suites(...) \
- static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \
+#define kunit_test_suites(suites_list...) \
+ static struct kunit_suite *suites[] = {suites_list, NULL}; \
static int kunit_test_suites_init(void) \
{ \
return __kunit_test_suites_init(suites); \
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
new file mode 100644
index 000000000000..bcb6aa27cfa6
--- /dev/null
+++ b/include/linux/atomic-arch-fallback.h
@@ -0,0 +1,2291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_relaxed arch_xchg
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+ arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+ arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+ arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+ int ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+ return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+ return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+ return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+ return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+ return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, c = arch_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+ arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+ arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ s64 ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+ return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+ return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+ return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 dec, c = arch_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// 90cd26cfd69d2250303d654955a0cc12620fb91b
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
index a7d240e465c0..2c4927bf7b8d 100644
--- a/include/linux/atomic-fallback.h
+++ b/include/linux/atomic-fallback.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
+#include <linux/compiler.h>
+
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
@@ -76,7 +78,7 @@
#endif /* cmpxchg64_relaxed */
#ifndef atomic_read_acquire
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v)
#endif
#ifndef atomic_set_release
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
smp_store_release(&(v)->counter, i);
@@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i)
#else /* atomic_add_return_relaxed */
#ifndef atomic_add_return_acquire
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
int ret = atomic_add_return_relaxed(i, v);
@@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_add_return_release
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_add_return
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
int ret;
@@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v)
#else /* atomic_fetch_add_relaxed */
#ifndef atomic_fetch_add_acquire
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_add_relaxed(i, v);
@@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add_release
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
int ret;
@@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v)
#else /* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_acquire
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
int ret = atomic_sub_return_relaxed(i, v);
@@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return_release
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
int ret;
@@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v)
#else /* atomic_fetch_sub_relaxed */
#ifndef atomic_fetch_sub_acquire
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_sub_relaxed(i, v);
@@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub_release
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
int ret;
@@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v)
#endif /* atomic_fetch_sub_relaxed */
#ifndef atomic_inc
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
atomic_add(1, v);
@@ -278,7 +280,7 @@ atomic_inc(atomic_t *v)
#endif /* atomic_inc_return */
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
return atomic_add_return(1, v);
@@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v)
#endif
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
return atomic_add_return_acquire(1, v);
@@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
return atomic_add_return_release(1, v);
@@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return_relaxed
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
return atomic_add_return_relaxed(1, v);
@@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v)
#else /* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
int ret = atomic_inc_return_relaxed(v);
@@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
int ret;
@@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v)
#endif /* atomic_fetch_inc */
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
return atomic_fetch_add(1, v);
@@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
return atomic_fetch_add_acquire(1, v);
@@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
return atomic_fetch_add_release(1, v);
@@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_relaxed
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
return atomic_fetch_add_relaxed(1, v);
@@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
#else /* atomic_fetch_inc_relaxed */
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
int ret = atomic_fetch_inc_relaxed(v);
@@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
__atomic_release_fence();
@@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
int ret;
@@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v)
#endif /* atomic_fetch_inc_relaxed */
#ifndef atomic_dec
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
atomic_sub(1, v);
@@ -449,7 +451,7 @@ atomic_dec(atomic_t *v)
#endif /* atomic_dec_return */
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
return atomic_sub_return(1, v);
@@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v)
#endif
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
return atomic_sub_return_acquire(1, v);
@@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
return atomic_sub_return_release(1, v);
@@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return_relaxed
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
return atomic_sub_return_relaxed(1, v);
@@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v)
#else /* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
int ret = atomic_dec_return_relaxed(v);
@@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
int ret;
@@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v)
#endif /* atomic_fetch_dec */
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
return atomic_fetch_sub(1, v);
@@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
return atomic_fetch_sub_acquire(1, v);
@@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
return atomic_fetch_sub_release(1, v);
@@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_relaxed
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
return atomic_fetch_sub_relaxed(1, v);
@@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
#else /* atomic_fetch_dec_relaxed */
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
int ret = atomic_fetch_dec_relaxed(v);
@@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
__atomic_release_fence();
@@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
int ret;
@@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v)
#else /* atomic_fetch_and_relaxed */
#ifndef atomic_fetch_and_acquire
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_and_relaxed(i, v);
@@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and_release
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
int ret;
@@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v)
#endif /* atomic_fetch_and_relaxed */
#ifndef atomic_andnot
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
@@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v)
#endif /* atomic_fetch_andnot */
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
return atomic_fetch_and(~i, v);
@@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
return atomic_fetch_and_acquire(~i, v);
@@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
return atomic_fetch_and_release(~i, v);
@@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_relaxed
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
return atomic_fetch_and_relaxed(~i, v);
@@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
#else /* atomic_fetch_andnot_relaxed */
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_andnot_relaxed(i, v);
@@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
int ret;
@@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#else /* atomic_fetch_or_relaxed */
#ifndef atomic_fetch_or_acquire
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_or_relaxed(i, v);
@@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or_release
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
int ret;
@@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v)
#else /* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_acquire
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_xor_relaxed(i, v);
@@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor_release
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
int ret;
@@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v)
#else /* atomic_xchg_relaxed */
#ifndef atomic_xchg_acquire
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
int ret = atomic_xchg_relaxed(v, i);
@@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
#endif
#ifndef atomic_xchg_release
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
__atomic_release_fence();
@@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i)
#endif
#ifndef atomic_xchg
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
int ret;
@@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i)
#else /* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_acquire
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
int ret = atomic_cmpxchg_relaxed(v, old, new);
@@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg_release
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
__atomic_release_fence();
@@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
#endif /* atomic_try_cmpxchg */
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
#else /* atomic_try_cmpxchg_relaxed */
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
@@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
__atomic_release_fence();
@@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
bool ret;
@@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
return atomic_sub_return(i, v) == 0;
@@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
return atomic_dec_return(v) == 0;
@@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
return atomic_inc_return(v) == 0;
@@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
@@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
return atomic_fetch_add_unless(v, a, u) != u;
@@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
return atomic_add_unless(v, 1, 0);
@@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v)
#endif
#ifndef atomic_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
int c = atomic_read(v);
@@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v)
#endif
#ifndef atomic_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
int c = atomic_read(v);
@@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v)
#endif
#ifndef atomic_dec_if_positive
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
int dec, c = atomic_read(v);
@@ -1178,15 +1180,12 @@ atomic_dec_if_positive(atomic_t *v)
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#ifndef atomic64_read_acquire
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -1195,7 +1194,7 @@ atomic64_read_acquire(const atomic64_t *v)
#endif
#ifndef atomic64_set_release
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
smp_store_release(&(v)->counter, i);
@@ -1210,7 +1209,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
#else /* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_acquire
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_add_return_relaxed(i, v);
@@ -1221,7 +1220,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return_release
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1231,7 +1230,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1252,7 +1251,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_add_relaxed */
#ifndef atomic64_fetch_add_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_add_relaxed(i, v);
@@ -1263,7 +1262,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add_release
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1273,7 +1272,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1294,7 +1293,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
#else /* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_acquire
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_sub_return_relaxed(i, v);
@@ -1305,7 +1304,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return_release
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1315,7 +1314,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1336,7 +1335,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_fetch_sub_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_sub_relaxed(i, v);
@@ -1347,7 +1346,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub_release
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1357,7 +1356,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1372,7 +1371,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_inc
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
atomic64_add(1, v);
@@ -1388,7 +1387,7 @@ atomic64_inc(atomic64_t *v)
#endif /* atomic64_inc_return */
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
return atomic64_add_return(1, v);
@@ -1397,7 +1396,7 @@ atomic64_inc_return(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
return atomic64_add_return_acquire(1, v);
@@ -1406,7 +1405,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
return atomic64_add_return_release(1, v);
@@ -1415,7 +1414,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
return atomic64_add_return_relaxed(1, v);
@@ -1426,7 +1425,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
#else /* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_inc_return_relaxed(v);
@@ -1437,7 +1436,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1447,7 +1446,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
s64 ret;
@@ -1469,7 +1468,7 @@ atomic64_inc_return(atomic64_t *v)
#endif /* atomic64_fetch_inc */
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
return atomic64_fetch_add(1, v);
@@ -1478,7 +1477,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
return atomic64_fetch_add_acquire(1, v);
@@ -1487,7 +1486,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
return atomic64_fetch_add_release(1, v);
@@ -1496,7 +1495,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
return atomic64_fetch_add_relaxed(1, v);
@@ -1507,7 +1506,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
#else /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_inc_relaxed(v);
@@ -1518,7 +1517,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1528,7 +1527,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
s64 ret;
@@ -1543,7 +1542,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_dec
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
atomic64_sub(1, v);
@@ -1559,7 +1558,7 @@ atomic64_dec(atomic64_t *v)
#endif /* atomic64_dec_return */
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
return atomic64_sub_return(1, v);
@@ -1568,7 +1567,7 @@ atomic64_dec_return(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
return atomic64_sub_return_acquire(1, v);
@@ -1577,7 +1576,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
return atomic64_sub_return_release(1, v);
@@ -1586,7 +1585,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
return atomic64_sub_return_relaxed(1, v);
@@ -1597,7 +1596,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
#else /* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_dec_return_relaxed(v);
@@ -1608,7 +1607,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1618,7 +1617,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
s64 ret;
@@ -1640,7 +1639,7 @@ atomic64_dec_return(atomic64_t *v)
#endif /* atomic64_fetch_dec */
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
return atomic64_fetch_sub(1, v);
@@ -1649,7 +1648,7 @@ atomic64_fetch_dec(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
return atomic64_fetch_sub_acquire(1, v);
@@ -1658,7 +1657,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
return atomic64_fetch_sub_release(1, v);
@@ -1667,7 +1666,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
return atomic64_fetch_sub_relaxed(1, v);
@@ -1678,7 +1677,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
#else /* atomic64_fetch_dec_relaxed */
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_dec_relaxed(v);
@@ -1689,7 +1688,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1699,7 +1698,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
s64 ret;
@@ -1720,7 +1719,7 @@ atomic64_fetch_dec(atomic64_t *v)
#else /* atomic64_fetch_and_relaxed */
#ifndef atomic64_fetch_and_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_and_relaxed(i, v);
@@ -1731,7 +1730,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and_release
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1741,7 +1740,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1756,7 +1755,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_and_relaxed */
#ifndef atomic64_andnot
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
atomic64_and(~i, v);
@@ -1772,7 +1771,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_andnot */
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
return atomic64_fetch_and(~i, v);
@@ -1781,7 +1780,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_acquire(~i, v);
@@ -1790,7 +1789,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_release(~i, v);
@@ -1799,7 +1798,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_relaxed(~i, v);
@@ -1810,7 +1809,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
#else /* atomic64_fetch_andnot_relaxed */
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_andnot_relaxed(i, v);
@@ -1821,7 +1820,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1831,7 +1830,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1852,7 +1851,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#else /* atomic64_fetch_or_relaxed */
#ifndef atomic64_fetch_or_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_or_relaxed(i, v);
@@ -1863,7 +1862,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or_release
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1873,7 +1872,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1894,7 +1893,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
#else /* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_xor_relaxed(i, v);
@@ -1905,7 +1904,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor_release
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1915,7 +1914,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1936,7 +1935,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
#else /* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_acquire
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
s64 ret = atomic64_xchg_relaxed(v, i);
@@ -1947,7 +1946,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg_release
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
__atomic_release_fence();
@@ -1957,7 +1956,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
s64 ret;
@@ -1978,7 +1977,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
#else /* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_acquire
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
@@ -1989,7 +1988,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg_release
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
__atomic_release_fence();
@@ -1999,7 +1998,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
s64 ret;
@@ -2021,7 +2020,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
#endif /* atomic64_try_cmpxchg */
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2034,7 +2033,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2047,7 +2046,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2060,7 +2059,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2075,7 +2074,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
#else /* atomic64_try_cmpxchg_relaxed */
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
@@ -2086,7 +2085,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
__atomic_release_fence();
@@ -2096,7 +2095,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
bool ret;
@@ -2120,7 +2119,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return atomic64_sub_return(i, v) == 0;
@@ -2137,7 +2136,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
return atomic64_dec_return(v) == 0;
@@ -2154,7 +2153,7 @@ atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
return atomic64_inc_return(v) == 0;
@@ -2172,7 +2171,7 @@ atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
@@ -2190,7 +2189,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c = atomic64_read(v);
@@ -2215,7 +2214,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
return atomic64_fetch_add_unless(v, a, u) != u;
@@ -2231,7 +2230,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
return atomic64_add_unless(v, 1, 0);
@@ -2240,7 +2239,7 @@ atomic64_inc_not_zero(atomic64_t *v)
#endif
#ifndef atomic64_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2256,7 +2255,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
#endif
#ifndef atomic64_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2272,7 +2271,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
#endif
#ifndef atomic64_dec_if_positive
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
s64 dec, c = atomic64_read(v);
@@ -2288,8 +2287,5 @@ atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif
-#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 25de4a2804d70f57e994fe3b419148658bb5378a
+// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 4c0d009a46f0..571a11008ab5 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -25,6 +25,12 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@@ -71,7 +77,12 @@
__ret; \
})
+#ifdef ARCH_ATOMIC
+#include <linux/atomic-arch-fallback.h>
+#include <asm-generic/atomic-instrumented.h>
+#else
#include <linux/atomic-fallback.h>
+#endif
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/bch.h b/include/linux/bch.h
index aa765af85c38..85fdce83d4e2 100644
--- a/include/linux/bch.h
+++ b/include/linux/bch.h
@@ -33,6 +33,7 @@
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
+ * @swap_bits: swap bits within data and syndrome bytes
*/
struct bch_control {
unsigned int m;
@@ -51,16 +52,18 @@ struct bch_control {
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
+ bool swap_bits;
};
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits);
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 8ed53d7524ea..e66b711d091e 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -4,7 +4,29 @@
#include <linux/types.h>
-void *bsearch(const void *key, const void *base, size_t num, size_t size,
- cmp_func_t cmp);
+static __always_inline
+void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
+{
+ const char *pivot;
+ int result;
+
+ while (num > 0) {
+ pivot = base + (num >> 1) * size;
+ result = cmp(key, pivot);
+
+ if (result == 0)
+ return (void *)pivot;
+
+ if (result > 0) {
+ base = pivot + size;
+ num--;
+ }
+ num >>= 1;
+ }
+
+ return NULL;
+}
+
+extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..1aa8009f6d06 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -15,8 +15,14 @@
/*
* __read_mostly is used to keep rarely changing variables out of frequently
- * updated cachelines. If an architecture doesn't support it, ignore the
- * hint.
+ * updated cachelines. Its use should be reserved for data that is used
+ * frequently in hot paths. Performance traces can help decide when to use
+ * this. You want __read_mostly data to be tightly packed, so that in the
+ * best case multiple frequently read variables for a hot path will be next
+ * to each other in order to reduce the number of cachelines needed to
+ * execute a critical path. We should be mindful and selective of its use.
+ * ie: if you're going to use it please supply a *good* justification in your
+ * commit log
*/
#ifndef __read_mostly
#define __read_mostly
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index a954def26c0d..900b9f4e0605 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -34,7 +34,7 @@
struct can_skb_priv {
int ifindex;
int skbcnt;
- struct can_frame cf[0];
+ struct can_frame cf[];
};
static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
index 60de3fedd3a7..405657a9a0d5 100644
--- a/include/linux/cb710.h
+++ b/include/linux/cb710.h
@@ -36,7 +36,7 @@ struct cb710_chip {
unsigned slot_mask;
unsigned slots;
spinlock_t irq_lock;
- struct cb710_slot slot[0];
+ struct cb710_slot slot[];
};
/* NOTE: cb710_chip.slots is modified only during device init/exit and
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 2247e71beb83..e5ed1c541e7f 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -52,8 +52,7 @@ struct ceph_options {
unsigned long osd_idle_ttl; /* jiffies */
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
-
- u32 osd_req_flags; /* CEPH_OSD_FLAG_*, applied to each OSD request */
+ u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */
/*
* any type that can't be simply compared or doesn't need
@@ -76,6 +75,7 @@ struct ceph_options {
#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000)
#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000)
#define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */
+#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */
#define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000)
#define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000)
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 2b1b35240074..3f01d43f0598 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state);
extern void tegra210_put_utmipll_in_iddq(void);
extern void tegra210_put_utmipll_out_iddq(void);
extern int tegra210_clk_handle_mbist_war(unsigned int id);
+extern void tegra210_clk_emc_dll_enable(bool flag);
+extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+extern void tegra210_clk_emc_update_setting(u32 emc_src_value);
struct clk;
@@ -143,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
void *cb_arg);
int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+struct tegra210_clk_emc_config {
+ unsigned long rate;
+ bool same_freq;
+ u32 value;
+
+ unsigned long parent_rate;
+ u8 parent;
+};
+
+struct tegra210_clk_emc_provider {
+ struct module *owner;
+ struct device *dev;
+
+ struct tegra210_clk_emc_config *configs;
+ unsigned int num_configs;
+
+ int (*set_rate)(struct device *dev,
+ const struct tegra210_clk_emc_config *config);
+};
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider);
+void tegra210_clk_emc_detach(struct clk *clk);
+
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 790c0c6b8552..ee37256ec8bd 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
@@ -24,6 +24,15 @@
#define __no_sanitize_address
#endif
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index d7ee4c6bad48..7dd4e0349ef3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,7 +10,8 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-#if GCC_VERSION < 40600
+/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
+#if GCC_VERSION < 40800
# error Sorry, your compiler is too old - please upgrade it.
#endif
@@ -126,9 +127,7 @@
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
-#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
-#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 70000
@@ -145,6 +144,12 @@
#define __no_sanitize_address
#endif
+#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
+#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#else
+#define __no_sanitize_thread
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6325d64e3c3b..30827f82ad62 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -230,60 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
-#include <uapi/linux/types.h>
-
-#define __READ_ONCE_SIZE \
-({ \
- switch (size) { \
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- default: \
- barrier(); \
- __builtin_memcpy((void *)res, (const void *)p, size); \
- barrier(); \
- } \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * We can't declare function 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
-#else
-# define __no_kasan_or_inline __always_inline
-#endif
-
-static __no_kasan_or_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
-}
-
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
@@ -293,11 +239,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* statements.
*
* These two macros will also work on aggregate data types like structs or
- * unions. If the size of the accessed data type exceeds the word size of
- * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
- * fall back to memcpy(). There's at least two memcpy()s: one for the
- * __builtin_memcpy() and then one for the macro doing the copy of variable
- * - '__u' allocated on the stack.
+ * unions.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
@@ -308,24 +250,79 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#include <asm/barrier.h>
#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ *
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
+ */
+#define data_race(expr) \
+({ \
+ __unqual_scalar_typeof(({ expr; })) __v = ({ \
+ __kcsan_disable_current(); \
+ expr; \
+ }); \
+ __kcsan_enable_current(); \
+ __v; \
+})
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity or dependency ordering guarantees. Note that this may result
+ * in tears!
+ */
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
-#define __READ_ONCE(x, check) \
+#define __READ_ONCE_SCALAR(x) \
({ \
- union { typeof(x) __val; char __c[1]; } __u; \
- if (check) \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- else \
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
- __u.__val; \
+ __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
+ smp_read_barrier_depends(); \
+ (typeof(x))__x; \
})
-#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE_SCALAR(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
*/
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ unsigned long __x; \
+ compiletime_assert(sizeof(x) == sizeof(__x), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ __x = __read_once_word_nocheck(&(x)); \
+ smp_read_barrier_depends(); \
+ (typeof(x))__x; \
+})
static __no_kasan_or_inline
unsigned long read_word_at_a_time(const void *addr)
@@ -334,14 +331,6 @@ unsigned long read_word_at_a_time(const void *addr)
return *(unsigned long *)addr;
}
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(x)) (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
#endif /* __KERNEL__ */
/*
@@ -406,6 +395,16 @@ static inline void *offset_to_ptr(const int *off)
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6fcf73200b67..e368384445b6 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -5,20 +5,20 @@
#ifndef __ASSEMBLY__
#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
# define __kernel __attribute__((address_space(0)))
+# define __user __attribute__((noderef, address_space(__user)))
# define __safe __attribute__((safe))
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
-# define __iomem __attribute__((noderef, address_space(2)))
+# define __iomem __attribute__((noderef, address_space(__iomem)))
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(3)))
-# define __rcu __attribute__((noderef, address_space(4)))
+# define __percpu __attribute__((noderef, address_space(__percpu)))
+# define __rcu __attribute__((noderef, address_space(__rcu)))
# define __private __attribute__((noderef))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
@@ -171,6 +171,38 @@ struct ftrace_likely_data {
*/
#define noinline_for_stack noinline
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#define __no_kcsan __no_sanitize_thread
+#ifdef __SANITIZE_THREAD__
+# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -218,6 +250,53 @@ struct ftrace_likely_data {
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged.
+ */
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
+/*
+ * We build this out of a couple of helper macros in a vain attempt to
+ * help you keep your lunch down while reading it.
+ */
+#define __pick_scalar_type(x, type, otherwise) \
+ __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
+
+/*
+ * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
+ * so we include the naked type here as well as the signed/unsigned variants.
+ */
+#define __pick_integer_type(x, type, otherwise) \
+ __pick_scalar_type(x, type, \
+ __pick_scalar_type(x, unsigned type, \
+ __pick_scalar_type(x, signed type, otherwise)))
+
+#define __unqual_scalar_typeof(x) typeof( \
+ __pick_integer_type(x, char, \
+ __pick_integer_type(x, short, \
+ __pick_integer_type(x, int, \
+ __pick_integer_type(x, long, \
+ __pick_integer_type(x, long long, x))))))
+#else
+/*
+ * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (x)))
+#endif
+
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 8cac62ee6add..981b880d5b60 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -33,13 +33,13 @@ static inline void user_exit(void)
}
/* Called with interrupts disabled. */
-static inline void user_enter_irqoff(void)
+static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_USER);
}
-static inline void user_exit_irqoff(void)
+static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_USER);
@@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
* is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging.
*/
-static inline enum ctx_state ct_state(void)
+static __always_inline enum ctx_state ct_state(void)
{
return context_tracking_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index e7fe6678b7ad..65a60d3313b0 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -26,12 +26,12 @@ struct context_tracking {
extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_enabled(void)
+static __always_inline bool context_tracking_enabled(void)
{
return static_branch_unlikely(&context_tracking_key);
}
-static inline bool context_tracking_enabled_cpu(int cpu)
+static __always_inline bool context_tracking_enabled_cpu(int cpu)
{
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
@@ -41,7 +41,7 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
-static inline bool context_tracking_in_user(void)
+static __always_inline bool context_tracking_in_user(void)
{
return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
}
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 65501d8f9778..a3bdc8a98f2c 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
struct cpuidle_driver;
#ifdef CONFIG_CPU_IDLE_THERMAL
-int cpuidle_cooling_register(struct cpuidle_driver *drv);
-int cpuidle_of_cooling_register(struct device_node *np,
- struct cpuidle_driver *drv);
+void cpuidle_cooling_register(struct cpuidle_driver *drv);
#else /* CONFIG_CPU_IDLE_THERMAL */
-static inline int cpuidle_cooling_register(struct cpuidle_driver *drv)
+static inline void cpuidle_cooling_register(struct cpuidle_driver *drv)
{
- return 0;
-}
-static inline int cpuidle_of_cooling_register(struct device_node *np,
- struct cpuidle_driver *drv)
-{
- return 0;
}
#endif /* CONFIG_CPU_IDLE_THERMAL */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 67d5950bd878..3494f6763597 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -367,7 +367,7 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
- int (*set_boost)(int state);
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
};
/* flags */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8377afef8806..191772d4a4d7 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index bc156285d097..a5192b718dbe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,9 +5,10 @@
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <asm/pgtable.h> /* for pgprot_t */
+#include <linux/pgtable.h> /* for pgprot_t */
#ifdef CONFIG_CRASH_DUMP
#define ELFCORE_ADDR_MAX (-1ULL)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index d7af5d243f24..6904d4e0b2e0 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -5,7 +5,6 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/radix-tree.h>
-#include <asm/pgtable.h>
/* Flag for synchronous flush */
#define DAXDEV_F_SYNC (1UL << 0)
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 257ab3c92cb8..e7e45f0cc7da 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -12,7 +12,7 @@ extern int debug_locks __read_mostly;
extern int debug_locks_silent __read_mostly;
-static inline int __debug_locks_off(void)
+static __always_inline int __debug_locks_off(void)
{
return xchg(&debug_locks, 0);
}
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index b59f1b6be3e9..ca09a4e07d2d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -3,7 +3,7 @@
#define _LINUX_DMA_NONCOHERENT_H 1
#include <linux/dma-mapping.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e1c03339918f..6283917edd90 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -153,7 +153,7 @@ struct dma_interleaved_template {
bool dst_sgl;
size_t numf;
size_t frame_size;
- struct data_chunk sgl[0];
+ struct data_chunk sgl[];
};
/**
@@ -535,7 +535,7 @@ struct dmaengine_unmap_data {
struct device *dev;
struct kref kref;
size_t len;
- dma_addr_t addr[0];
+ dma_addr_t addr[];
};
struct dma_async_tx_descriptor;
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 0f20b986b0ab..6eb7d55d7c3d 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -31,14 +31,6 @@ struct device;
extern int edac_op_state;
struct bus_type *edac_get_sysfs_subsys(void);
-int edac_get_report_status(void);
-void edac_set_report_status(int new);
-
-enum {
- EDAC_REPORTING_ENABLED,
- EDAC_REPORTING_DISABLED,
- EDAC_REPORTING_FORCE
-};
static inline void opstate_init(void)
{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 01f5d296f9bb..3f881a892ea7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1048,6 +1048,7 @@ struct lock_manager_operations {
bool (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
};
struct lock_manager {
@@ -1412,6 +1413,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+
/* Possible states of 'frozen' field */
enum {
SB_UNFROZEN = 0, /* FS is unfrozen */
@@ -1679,10 +1682,10 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*
* Since page fault freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
* handling code implies lock dependency:
*
- * mmap_sem
+ * mmap_lock
* -> sb_start_pagefault
*/
static inline void sb_start_pagefault(struct super_block *sb)
@@ -2589,7 +2592,6 @@ extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);
extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
extern int sync_blockdev(struct block_device *bdev);
-extern void kill_bdev(struct block_device *);
extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern void emergency_thaw_bdev(struct super_block *sb);
@@ -2605,7 +2607,6 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb)
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void kill_bdev(struct block_device *bdev) {}
static inline void invalidate_bdev(struct block_device *bdev) {}
static inline struct super_block *freeze_bdev(struct block_device *sb)
@@ -3201,6 +3202,8 @@ enum {
DIO_SKIP_HOLES = 0x02,
};
+void dio_end_io(struct bio *bio);
+
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index ce0b5fbf239d..3f0b19dcfae7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -46,7 +46,7 @@ struct fscache_cache_tag {
unsigned long flags;
#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
atomic_t usage;
- char name[0]; /* tag name */
+ char name[]; /* tag name */
};
/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dfbbf7a7208b..e339dac91ee6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -342,9 +342,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
extern int stack_tracer_enabled;
-int stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index e07cf853aa16..03c9fece7d43 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -38,9 +38,24 @@ static __always_inline void rcu_irq_enter_check_tick(void)
} while (0)
/*
+ * Like __irq_enter() without time accounting for fast
+ * interrupts, e.g. reschedule IPI where time accounting
+ * is more expensive than the actual interrupt.
+ */
+#define __irq_enter_raw() \
+ do { \
+ preempt_count_add(HARDIRQ_OFFSET); \
+ lockdep_hardirq_enter(); \
+ } while (0)
+
+/*
* Enter irq context (on NO_HZ, update jiffies):
*/
-extern void irq_enter(void);
+void irq_enter(void);
+/*
+ * Like irq_enter(), but RCU is already watching.
+ */
+void irq_enter_rcu(void);
/*
* Exit irq context without processing softirqs:
@@ -53,9 +68,23 @@ extern void irq_enter(void);
} while (0)
/*
+ * Like __irq_exit() without time accounting
+ */
+#define __irq_exit_raw() \
+ do { \
+ lockdep_hardirq_exit(); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
* Exit irq context and process softirqs if needed:
*/
-extern void irq_exit(void);
+void irq_exit(void);
+
+/*
+ * Like irq_exit(), but return with RCU watching.
+ */
+void irq_exit_rcu(void);
#ifndef arch_nmi_enter
#define arch_nmi_enter() do { } while (0)
@@ -87,20 +116,24 @@ extern void rcu_nmi_exit(void);
arch_nmi_enter(); \
printk_nmi_enter(); \
lockdep_off(); \
- ftrace_nmi_enter(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
lockdep_hardirq_enter(); \
+ instrumentation_begin(); \
+ ftrace_nmi_enter(); \
+ instrumentation_end(); \
} while (0)
#define nmi_exit() \
do { \
+ instrumentation_begin(); \
+ ftrace_nmi_exit(); \
+ instrumentation_end(); \
lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- ftrace_nmi_exit(); \
lockdep_on(); \
printk_nmi_exit(); \
arch_nmi_exit(); \
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index e912b9dc4633..f4a09ed223ac 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -10,7 +10,7 @@
#define LINUX_HMM_H
#include <linux/kconfig.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/device.h>
#include <linux/migrate.h>
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index c230b4e70d75..a3a568bf9686 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -48,6 +48,9 @@ struct host1x_client_ops {
* @channel: host1x channel associated with this client
* @syncpts: array of syncpoints requested for this client
* @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
*/
struct host1x_client {
struct list_head list;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cfbb0a87c5f0..71f20776b06c 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -248,7 +248,7 @@ static inline int is_swap_pmd(pmd_t pmd)
return !pmd_none(pmd) && !pmd_present(pmd);
}
-/* mmap_sem must be held on entry */
+/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0cced410e0bd..50650d0d01b9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -9,7 +9,7 @@
#include <linux/cgroup.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
struct ctl_table;
struct user_struct;
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 8c5459034f92..1e4e0de4ef8b 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -2,7 +2,7 @@
/*
* i2c-smbus.h - SMBus extensions to the I2C protocol
*
- * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
+ * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de>
*/
#ifndef _LINUX_I2C_SMBUS_H
@@ -39,4 +39,10 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
}
#endif
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
+void i2c_register_spd(struct i2c_adapter *adap);
+#else
+static inline void i2c_register_spd(struct i2c_adapter *adap) { }
+#endif
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 49d29054e657..b8b8963f8bb9 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -351,14 +351,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
return to_i2c_client(dev);
}
-static inline void *i2c_get_clientdata(const struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *client)
{
- return dev_get_drvdata(&dev->dev);
+ return dev_get_drvdata(&client->dev);
}
-static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
{
- dev_set_drvdata(&dev->dev, data);
+ dev_set_drvdata(&client->dev, data);
}
/* I2C slave support */
@@ -408,7 +408,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* that are present. This information is used to grow the driver model tree.
* For mainboards this is done statically using i2c_register_board_info();
* bus numbers identify adapters that aren't yet available. For add-on boards,
- * i2c_new_device() does this dynamically with the adapter already known.
+ * i2c_new_client_device() does this dynamically with the adapter already known.
*/
struct i2c_board_info {
char type[I2C_NAME_SIZE];
@@ -439,14 +439,12 @@ struct i2c_board_info {
#if IS_ENABLED(CONFIG_I2C)
-/* Add-on boards should register/unregister their devices; e.g. a board
+/*
+ * Add-on boards should register/unregister their devices; e.g. a board
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
*/
struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
-
-struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
index a445cd1a36c5..91a8612b8bf9 100644
--- a/include/linux/idle_inject.h
+++ b/include/linux/idle_inject.h
@@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev,
void idle_inject_get_duration(struct idle_inject_device *ii_dev,
unsigned int *run_duration_us,
unsigned int *idle_duration_us);
+
+void idle_inject_set_latency(struct idle_inject_device *ii_dev,
+ unsigned int latency_ns);
+
#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644
index 3614a13a8297..000000000000
--- a/include/linux/input/gp2ap002a00f.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
- int vout_gpio;
- bool wakeup;
- int (*hw_setup)(struct i2c_client *client);
- int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 9e409bb13642..3b8580bd33c1 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis)
bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+ input_mt_report_slot_state(dev, 0, false);
+}
+
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
void input_mt_drop_unused(struct input_dev *dev);
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..43e6ea591975
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_copy_from_user - instrument writes of copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4100bd224f5c..3e8fa1c7a1e6 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -41,6 +41,7 @@
#define DMA_PTE_SNP BIT_ULL(11)
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_XD BIT_ULL(63)
#define ADDR_WIDTH_5LEVEL (57)
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index d8c29049f066..3a63d98613fc 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -35,6 +35,7 @@ int icc_enable(struct icc_path *path);
int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
#else
@@ -84,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag)
{
}
+static inline const char *icc_get_name(struct icc_path *path)
+{
+ return NULL;
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 80f637c3a6f3..5db970b6615a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -760,8 +760,10 @@ extern int arch_early_irq_init(void);
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-#define __softirq_entry \
- __attribute__((__section__(".softirqentry.text")))
+#ifndef __irq_entry
+# define __irq_entry __attribute__((__section__(".irqentry.text")))
+#endif
+
+#define __softirq_entry __attribute__((__section__(".softirqentry.text")))
#endif
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index b336622612f3..0beaa3eba155 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
/*
@@ -99,7 +100,6 @@ io_mapping_unmap(void __iomem *vaddr)
#else
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index d7f7e436c3af..6384d2813ded 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -32,7 +32,7 @@
#ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on_prepare(void);
- extern void trace_hardirqs_off_prepare(void);
+ extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context(p) ((p)->hardirq_context)
@@ -101,7 +101,7 @@ do { \
#else
# define trace_hardirqs_on_prepare() do { } while (0)
-# define trace_hardirqs_off_prepare() do { } while (0)
+# define trace_hardirqs_off_finish() do { } while (0)
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
# define lockdep_hardirq_context(p) 0
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f613d8529863..d56128df2aff 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -766,6 +766,11 @@ struct journal_s
int j_errno;
/**
+ * @j_abort_mutex: Lock the whole aborting procedure.
+ */
+ struct mutex j_abort_mutex;
+
+ /**
* @j_sb_buffer: The first part of the superblock buffer.
*/
struct buffer_head *j_sb_buffer;
@@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
-#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
/*
* Function declarations for the journaling transaction and buffer
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 657a83b943f0..98338dc6b5d2 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -165,9 +165,9 @@ static inline int kallsyms_show_value(void)
#endif /*CONFIG_KALLSYMS*/
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
- printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
+ printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 31314ca7c635..82522e996c76 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -11,8 +11,8 @@ struct task_struct;
#ifdef CONFIG_KASAN
+#include <linux/pgtable.h>
#include <asm/kasan.h>
-#include <asm/pgtable.h>
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..7b0b9c44f5f3
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/*
+ * ACCESS TYPE MODIFIERS
+ *
+ * <none>: normal read access;
+ * WRITE : write access;
+ * ATOMIC: access is atomic;
+ * ASSERT: access is not a regular access, but an assertion;
+ * SCOPED: access is a scoped access;
+ */
+#define KCSAN_ACCESS_WRITE 0x1
+#define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4
+#define KCSAN_ACCESS_SCOPED 0x8
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ struct list_head list;
+ const volatile void *ptr;
+ size_t size;
+ int type;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..53340d8789f9
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses. */
+ struct list_head scoped_accesses;
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 1776eb2e43a4..ea67910ae6b7 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -208,7 +208,7 @@ struct crash_mem_range {
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
- struct crash_mem_range ranges[0];
+ struct crash_mem_range ranges[];
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
diff --git a/include/linux/key.h b/include/linux/key.h
index 6cf8e71cf8b7..0f2e24f13c2b 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -71,6 +71,23 @@ struct net;
#define KEY_PERM_UNDEF 0xffffffff
+/*
+ * The permissions required on a key that we're looking up.
+ */
+enum key_need_perm {
+ KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */
+ KEY_NEED_VIEW, /* Require permission to view attributes */
+ KEY_NEED_READ, /* Require permission to read content */
+ KEY_NEED_WRITE, /* Require permission to update / modify */
+ KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */
+ KEY_NEED_LINK, /* Require permission to link */
+ KEY_NEED_SETATTR, /* Require permission to change attributes */
+ KEY_NEED_UNLINK, /* Require permission to unlink key */
+ KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */
+ KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */
+ KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -176,6 +193,9 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct watch_list *watchers; /* Entities watching this key for changes */
+#endif
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
@@ -417,20 +437,9 @@ static inline key_serial_t key_serial(const struct key *key)
extern void key_set_timeout(struct key *, unsigned);
extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
- key_perm_t perm);
+ enum key_need_perm need_perm);
extern void key_free_user_ns(struct user_namespace *);
-/*
- * The permissions required on a key that we're looking up.
- */
-#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */
-#define KEY_NEED_READ 0x02 /* Require permission to read content */
-#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */
-#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */
-#define KEY_NEED_LINK 0x10 /* Require permission to link */
-#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
-#define KEY_NEED_ALL 0x3f /* All the above permissions */
-
static inline short key_read_state(const struct key *key)
{
/* Barrier versus mark_key_instantiated(). */
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index c62d76478adc..529116b0cabe 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -276,8 +276,7 @@ struct kgdb_arch {
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
*/
struct kgdb_io {
const char *name;
@@ -288,7 +287,7 @@ struct kgdb_io {
void (*deinit) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
- int is_console;
+ struct console *cons;
};
extern const struct kgdb_arch arch_kgdb_ops;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index fc8d83e91379..6cba088bee24 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -29,7 +29,7 @@
#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
-#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 594265bfd390..6adf90f248d7 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -161,7 +161,7 @@ struct kretprobe_instance {
kprobe_opcode_t *ret_addr;
struct task_struct *task;
void *fp;
- char data[0];
+ char data[];
};
struct kretprobe_blackpoint {
@@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk);
}
+extern struct kprobe kprobe_busy;
+void kprobe_busy_begin(void);
+void kprobe_busy_end(void);
+
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8bbcaad7ef0f..65b81e0c494d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -5,6 +5,8 @@
#include <linux/err.h>
#include <linux/sched.h>
+struct mm_struct;
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -57,6 +59,7 @@ bool kthread_should_stop(void);
bool kthread_should_park(void);
bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
@@ -198,6 +201,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
+
struct cgroup_subsys_state;
#ifdef CONFIG_BLK_CGROUP
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f43b59b1294c..d564855243d8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -206,6 +206,7 @@ struct kvm_async_pf {
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
+ bool notpresent_injected;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -318,7 +319,6 @@ struct kvm_vcpu {
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
- struct dentry *debugfs_dentry;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -409,7 +409,7 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[0];
+ struct hlist_head map[];
};
#endif
@@ -888,7 +888,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
-void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
#endif
int kvm_arch_hardware_enable(void);
@@ -1421,8 +1421,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable);
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index af832852e620..e7e5256817dc 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/cdrom.h>
#include <linux/sched.h>
+#include <linux/async.h>
/*
* Define if arch has non-standard setup. This is a _PCI_ standard
@@ -609,7 +610,7 @@ struct ata_host {
struct task_struct *eh_owner;
struct ata_port *simplex_claimed; /* channel owning the DMA */
- struct ata_port *ports[0];
+ struct ata_port *ports[];
};
struct ata_queued_cmd {
@@ -872,6 +873,8 @@ struct ata_port {
struct timer_list fastdrain_timer;
unsigned long fastdrain_cnt;
+ async_cookie_t cookie;
+
int em_message_type;
void *private_data;
@@ -1092,7 +1095,11 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
#define ATA_SCSI_COMPAT_IOCTL /* empty */
#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+#if IS_ENABLED(CONFIG_ATA)
bool ata_scsi_dma_need_drain(struct request *rq);
+#else
+#define ata_scsi_dma_need_drain NULL
+#endif
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
unsigned int cmd, void __user *arg);
extern bool ata_link_online(struct ata_link *link);
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 99d629fd9944..28f23b341c1c 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -75,6 +75,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IBPKEY 13
#define LSM_AUDIT_DATA_IBENDPORT 14
#define LSM_AUDIT_DATA_LOCKDOWN 15
+#define LSM_AUDIT_DATA_NOTIFICATION 16
union {
struct path path;
struct dentry *dentry;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index fb3ce6cec997..6791813cd439 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -191,6 +191,8 @@ LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf,
loff_t size, enum kernel_read_file_id id)
LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
int flags)
+LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
+ int flags)
LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
@@ -254,6 +256,15 @@ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
u32 *ctxlen)
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
+ const struct cred *cred, struct watch_notification *n)
+#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+LSM_HOOK(int, 0, watch_key, struct key *key)
+#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+
#ifdef CONFIG_SECURITY_NETWORK
LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
struct sock *newsk)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 3e62dab77699..95b7c1d32062 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -659,6 +659,15 @@
* @old is the set of credentials that are being replaces
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
+ * @task_fix_setgid:
+ * Update the module's state after setting one or more of the group
+ * identity attributes of the current process. The @flags parameter
+ * indicates which of the set*gid system calls invoked this hook.
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaced.
+ * @flags contains one of the LSM_SETID_* values.
+ * Return 0 on success.
* @task_setpgid:
* Check permission before setting the process group identifier of the
* process @p to @pgid.
@@ -1445,6 +1454,20 @@
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
*
+ * Security hooks for the general notification queue:
+ *
+ * @post_notification:
+ * Check to see if a watch notification can be posted to a particular
+ * queue.
+ * @w_cred: The credentials of the whoever set the watch.
+ * @cred: The event-triggerer's credentials
+ * @n: The notification being posted
+ *
+ * @watch_key:
+ * Check to see if a process is allowed to watch for event notifications
+ * from a key or keyring.
+ * @key: The key to watch.
+ *
* Security hooks for using the eBPF maps and programs functionalities through
* eBPF syscalls.
*
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fee7fab5d706..375515803cd8 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -318,6 +318,7 @@ extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern int remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
+extern int offline_and_remove_memory(int nid, u64 start, u64 size);
#else
static inline void try_offline_node(int nid) {}
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8165278c348a..ea9c15b60a96 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -31,7 +31,7 @@ struct mm_struct;
* Locking policy for interlave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
- * mmap_sem.
+ * mmap_lock.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 116bd9bb347f..ca1887dd0423 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_rst2init_qp_in_bits {
@@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
@@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_init2init_qp_in_bits {
@@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9d6042178ca7..dc7b87310c10 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
@@ -28,6 +29,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
+#include <linux/pgtable.h>
struct mempolicy;
struct anon_vma;
@@ -92,7 +94,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
/*
@@ -401,7 +402,7 @@ extern pgprot_t protection_map[16];
* @FAULT_FLAG_WRITE: Fault was a write fault.
* @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
* @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
- * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
* @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
* @FAULT_FLAG_TRIED: The fault has been tried once.
* @FAULT_FLAG_USER: The fault originated in userspace.
@@ -451,10 +452,10 @@ extern pgprot_t protection_map[16];
* fault_flag_allow_retry_first - check ALLOW_RETRY the first time
*
* This is mostly used for places where we want to try to avoid taking
- * the mmap_sem for too long a time when waiting for another condition
+ * the mmap_lock for too long a time when waiting for another condition
* to change, in which case we can try to be polite to release the
- * mmap_sem in the first round to avoid potential starvation of other
- * processes that would also want the mmap_sem.
+ * mmap_lock in the first round to avoid potential starvation of other
+ * processes that would also want the mmap_lock.
*
* Return: true if the page fault allows retry and this is the first
* attempt of the fault handling; false otherwise.
@@ -581,7 +582,7 @@ struct vm_operations_struct {
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
- * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ef6d3aface8a..64ede5f150dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -344,7 +344,7 @@ struct vm_area_struct {
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
- struct list_head anon_vma_chain; /* Serialized by mmap_sem &
+ struct list_head anon_vma_chain; /* Serialized by mmap_lock &
* page_table_lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
@@ -440,7 +440,7 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
- struct rw_semaphore mmap_sem;
+ struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
* are globally strung together off
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
new file mode 100644
index 000000000000..0707671851a8
--- /dev/null
+++ b/include/linux/mmap_lock.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_MMAP_LOCK_H
+#define _LINUX_MMAP_LOCK_H
+
+#include <linux/mmdebug.h>
+
+#define MMAP_LOCK_INITIALIZER(name) \
+ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+
+static inline void mmap_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock(struct mm_struct *mm)
+{
+ down_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+ down_write_nested(&mm->mmap_lock, subclass);
+}
+
+static inline int mmap_write_lock_killable(struct mm_struct *mm)
+{
+ return down_write_killable(&mm->mmap_lock);
+}
+
+static inline bool mmap_write_trylock(struct mm_struct *mm)
+{
+ return down_write_trylock(&mm->mmap_lock) != 0;
+}
+
+static inline void mmap_write_unlock(struct mm_struct *mm)
+{
+ up_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_downgrade(struct mm_struct *mm)
+{
+ downgrade_write(&mm->mmap_lock);
+}
+
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ down_read(&mm->mmap_lock);
+}
+
+static inline int mmap_read_lock_killable(struct mm_struct *mm)
+{
+ return down_read_killable(&mm->mmap_lock);
+}
+
+static inline bool mmap_read_trylock(struct mm_struct *mm)
+{
+ return down_read_trylock(&mm->mmap_lock) != 0;
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ up_read(&mm->mmap_lock);
+}
+
+static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
+{
+ if (down_read_trylock(&mm->mmap_lock)) {
+ rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
+ return true;
+ }
+ return false;
+}
+
+static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
+{
+ up_read_non_owner(&mm->mmap_lock);
+}
+
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held_write(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index d9a543a9e1cc..c51a84132d7c 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -4,11 +4,6 @@
#include <asm/mmu_context.h>
-struct mm_struct;
-
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
-
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 736f6918335e..fc68f3570e19 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -121,7 +122,7 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
- * paired and are called only when the mmap_sem and/or the
+ * paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
* can't guarantee that no additional references are taken to
* the pages in the range, it has to implement the
@@ -212,13 +213,13 @@ struct mmu_notifier_ops {
};
/*
- * The notifier chains are protected by mmap_sem and/or the reverse map
+ * The notifier chains are protected by mmap_lock and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
- * the mmap_sem locks are taken.
+ * the mmap_lock locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
- * 1. mmap_sem is held.
+ * 1. mmap_lock is held.
* 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
@@ -277,9 +278,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
{
struct mmu_notifier *ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = mmu_notifier_get_locked(ops, mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
void mmu_notifier_put(struct mmu_notifier *subscription);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index df1f08486d81..f6f884970511 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -257,8 +257,8 @@ struct lruvec {
*/
unsigned long anon_cost;
unsigned long file_cost;
- /* Evictions & activations on the inactive file list */
- atomic_long_t inactive_age;
+ /* Non-resident age, driven by LRU movement */
+ atomic_long_t nonresident_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults;
/* Various lruvec state flags (enum lruvec_flags) */
@@ -660,9 +660,21 @@ struct deferred_split {
* per-zone basis.
*/
typedef struct pglist_data {
+ /*
+ * node_zones contains just the zones for THIS node. Not all of the
+ * zones may be populated, but it is the full list. It is referenced by
+ * this node's node_zonelists as well as other node's node_zonelists.
+ */
struct zone node_zones[MAX_NR_ZONES];
+
+ /*
+ * node_zonelists contains references to all zones in all nodes.
+ * Generally the first zones will be references to this node's
+ * node_zones.
+ */
struct zonelist node_zonelists[MAX_ZONELISTS];
- int nr_zones;
+
+ int nr_zones; /* number of populated zones in this node */
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 7edac8c7a9c1..de657bd211fa 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -111,4 +111,6 @@ extern unsigned int sysctl_mount_max;
extern bool path_is_mountpoint(const struct path *path);
+extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 886e30441c90..d890805f5494 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -98,7 +98,7 @@ struct nand_bbt_descr {
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
* in nand_chip.bbt_options.
*/
#define NAND_BBT_DYNAMICSTRUCT 0x80000000
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index c98a21108688..fd1ecb821106 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -138,7 +138,7 @@ struct cfi_ident {
uint16_t InterfaceDesc;
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
+ uint32_t EraseRegionInfo[]; /* Not host ordered */
} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -165,7 +165,7 @@ struct cfi_pri_intelext {
uint16_t ProtRegAddr;
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
- uint8_t extra[0];
+ uint8_t extra[];
} __packed;
struct cfi_intelext_otpinfo {
@@ -286,7 +286,7 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[0]; /* per-chip data structure for each chip */
+ struct flchip chips[]; /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 2d1f4a61f4ac..157357ec1441 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -200,6 +200,8 @@ struct mtd_debug_info {
*
* @node: list node used to add an MTD partition to the parent partition list
* @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ * MTD_SLC_ON_MLC_EMULATION is set
* @flags: original flags (before the mtdpart logic decided to tweak them based
* on flash constraints, like eraseblock/pagesize alignment)
*
@@ -209,6 +211,7 @@ struct mtd_debug_info {
struct mtd_part {
struct list_head node;
u64 offset;
+ u64 size;
u32 flags;
};
@@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
{
- return mtd->erasesize / mtd->writesize;
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ return master->erasesize / mtd->writesize;
}
static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index e545c050d3e8..b74a539ec581 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,6 +37,7 @@
* master MTD flag set for the corresponding MTD partition.
* For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
*
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
@@ -48,6 +49,7 @@ struct mtd_partition {
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ uint32_t add_flags; /* flags to add to the partition */
struct device_node *of_node;
};
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index df5b9fddea16..2e3f43788d48 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[0];
+ struct flchip chips[];
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 1e76196f9829..65b1c1c18b41 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -83,14 +83,14 @@ struct nand_chip;
/*
* Constants for ECC_MODES
*/
-typedef enum {
+enum nand_ecc_mode {
+ NAND_ECC_INVALID,
NAND_ECC_NONE,
NAND_ECC_SOFT,
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
NAND_ECC_ON_DIE,
-} nand_ecc_modes_t;
+};
enum nand_ecc_algo {
NAND_ECC_UNKNOWN,
@@ -119,85 +119,73 @@ enum nand_ecc_algo {
#define NAND_ECC_MAXIMIZE BIT(1)
/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 BIT(1)
+
+/*
* When using software implementation of Hamming, we can specify which byte
* ordering should be used.
*/
#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
- */
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
* autoincrement.
*/
-#define NAND_NEED_READRDY 0x00000100
+#define NAND_NEED_READRDY BIT(8)
/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+#define NAND_BROKEN_XD BIT(10)
/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+#define NAND_ROM BIT(11)
/* Device supports subpage reads */
-#define NAND_SUBPAGE_READ 0x00001000
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/*
* Some MLC NANDs need data scrambling to limit bitflips caused by repeated
* patterns.
*/
-#define NAND_NEED_SCRAMBLING 0x00002000
+#define NAND_NEED_SCRAMBLING BIT(13)
/* Device needs 3rd row address cycle */
-#define NAND_ROW_ADDR_3 0x00004000
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
-
-/* Macros to identify the above */
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-
-/*
- * There are different places where the manufacturer stores the factory bad
- * block markers.
- *
- * Position within the block: Each of these pages needs to be checked for a
- * bad block marking pattern.
- */
-#define NAND_BBM_FIRSTPAGE 0x01000000
-#define NAND_BBM_SECONDPAGE 0x02000000
-#define NAND_BBM_LASTPAGE 0x04000000
-
-/* Position within the OOB data of the page */
-#define NAND_BBM_POS_SMALL 5
-#define NAND_BBM_POS_LARGE 0
+#define NAND_ROW_ADDR_3 BIT(14)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
+#define NAND_SKIP_BBTSCAN BIT(16)
/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
-#define NAND_BUSWIDTH_AUTO 0x00080000
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
/*
* This option could be defined by controller drivers to protect against
* kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
*/
-#define NAND_USE_BOUNCE_BUFFER 0x00100000
+#define NAND_USES_DMA BIT(20)
/*
* In case your controller is implementing ->legacy.cmd_ctrl() and is relying
@@ -207,26 +195,49 @@ enum nand_ecc_algo {
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
-#define NAND_WAIT_TCCS 0x00200000
+#define NAND_WAIT_TCCS BIT(21)
/*
* Whether the NAND chip is a boot medium. Drivers might use this information
* to select ECC algorithms supported by the boot ROM or similar restrictions.
*/
-#define NAND_IS_BOOT_MEDIUM 0x00400000
+#define NAND_IS_BOOT_MEDIUM BIT(22)
/*
* Do not try to tweak the timings at runtime. This is needed when the
* controller initializes the timings on itself or when it relies on
* configuration done by the bootloader.
*/
-#define NAND_KEEP_TIMINGS 0x00800000
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
/**
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
@@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \
* @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
+ enum nand_ecc_mode mode;
enum nand_ecc_algo algo;
int steps;
int size;
@@ -491,13 +502,17 @@ enum nand_data_interface_type {
/**
* struct nand_data_interface - NAND interface timing
* @type: type of the timing
- * @timings: The timing, type according to @type
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
*/
struct nand_data_interface {
enum nand_data_interface_type type;
- union {
- struct nand_sdr_timings sdr;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ };
} timings;
};
@@ -694,6 +709,7 @@ struct nand_op_instr {
/**
* struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
* @instrs: array of instructions
* @ninstrs: length of the @instrs array
* @first_instr_start_off: offset to start from for the first instruction
@@ -709,6 +725,7 @@ struct nand_op_instr {
* controller driver.
*/
struct nand_subop {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
unsigned int first_instr_start_off;
@@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page);
int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
u8 *subfeature_param);
-/* Default read_page_raw implementation */
+/* read_page_raw implementations */
int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
-/* Default write_page_raw implementation */
+/* write_page_raw implementations */
int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool force_8bit);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
- bool force_8bit);
+ bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
@@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip);
* sucessful nand_scan().
*/
void nand_cleanup(struct nand_chip *chip);
-/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
@@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
void nand_select_target(struct nand_chip *chip, unsigned int cs);
void nand_deselect_target(struct nand_chip *chip);
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
/**
* nand_get_data_buf() - Get the internal page buffer
* @chip: NAND chip object
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 1e2af0ec1f03..60bac2c0ec45 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -20,6 +20,7 @@
*/
/* Flash opcodes. */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_WREN 0x06 /* Write enable */
#define SPINOR_OP_RDSR 0x05 /* Read status register */
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
@@ -80,7 +81,6 @@
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
-#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
/* Used for S3AN flashes only */
@@ -302,7 +302,7 @@ struct spi_nor;
* @read: read data from the SPI NOR.
* @write: write data to the SPI NOR.
* @erase: erase a sector of the SPI NOR at the offset @offs; if
- * not provided by the driver, spi-nor will send the erase
+ * not provided by the driver, SPI NOR will send the erase
* opcode via write_reg().
*/
struct spi_nor_controller_ops {
@@ -327,16 +327,16 @@ struct spi_nor_manufacturer;
struct spi_nor_flash_parameter;
/**
- * struct spi_nor - Structure for defining a the SPI NOR layer
- * @mtd: point to a mtd_info structure
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd: an mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
- * @dev: point to a spi device, or a spi nor controller device.
- * @spimem: point to the spi mem device
+ * @dev: pointer to an SPI device or an SPI NOR controller device
+ * @spimem: pointer to the SPI memory device
* @bouncebuf: bounce buffer used when the buffer passed by the MTD
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
- * @info: spi-nor part JDEC MFR id and other info
- * @manufacturer: spi-nor manufacturer
+ * @info: SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer: SPI NOR manufacturer
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -344,17 +344,17 @@ struct spi_nor_flash_parameter;
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
* @sst_write_second: used by the SST write operation
- * @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @flags: flag options for the current SPI NOR (SNOR_F_*)
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
- * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
+ * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
* @controller_ops: SPI NOR controller driver specific operations.
- * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
+ * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
* hooks, or dynamically when parsing the SFDP tables.
* @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
- * @priv: the private data
+ * @priv: pointer to the private data
*/
struct spi_nor {
struct mtd_info mtd;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5b364a2e0006..39e28e11863c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1821,8 +1821,6 @@ enum netdev_priv_flags {
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
*
- * @addr_list_lock_key: lockdep class annotating
- * net_device->addr_list_lock spinlock
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
@@ -2125,7 +2123,6 @@ struct net_device {
#endif
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
- struct lock_class_key addr_list_lock_key;
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
@@ -2217,10 +2214,13 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
static struct lock_class_key qdisc_tx_busylock_key; \
static struct lock_class_key qdisc_running_key; \
static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
unsigned int i; \
\
(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
(dev)->qdisc_running_key = &qdisc_running_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
for (i = 0; i < (dev)->num_tx_queues; i++) \
lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
&qdisc_xmit_lock_key); \
@@ -3157,7 +3157,7 @@ static inline int dev_recursion_level(void)
return this_cpu_read(softnet_data.xmit.recursion);
}
-#define XMIT_RECURSION_LIMIT 10
+#define XMIT_RECURSION_LIMIT 8
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
@@ -3253,7 +3253,6 @@ static inline void netif_stop_queue(struct net_device *dev)
}
void netif_tx_stop_all_queues(struct net_device *dev);
-void netdev_update_lockdep_key(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -4239,6 +4238,11 @@ static inline void netif_addr_lock(struct net_device *dev)
spin_lock(&dev->addr_list_lock);
}
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ spin_lock_nested(&dev->addr_list_lock, dev->lower_level);
+}
+
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index b394bd4f68a3..c4676d6feeff 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -25,6 +25,12 @@
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res);
+
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table);
+
void ipt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 8225f7821a29..1547d5f9ae06 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops, struct xt_table **res);
void ip6t_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops);
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 82d8fb422092..4dba3c948932 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -38,7 +38,7 @@ struct nfs4_ace {
struct nfs4_acl {
uint32_t naces;
- struct nfs4_ace aces[0];
+ struct nfs4_ace aces[];
};
#define NFS4_MAXLABELLEN 2048
@@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_NOFILEHANDLE:
case NFS4ERR_MOVED:
return false;
- };
+ }
return true;
}
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 73eda45f1cfd..6ee9119acc5d 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -230,6 +230,7 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
#define NFS_INO_DATA_INVAL_DEFER \
BIT(13) /* Deferred cache invalidation */
+#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index e5f3e7d8d3d5..5fd0a9ef425f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 {
struct nfs4_secinfo_flavors {
unsigned int num_flavors;
- struct nfs4_secinfo4 flavors[0];
+ struct nfs4_secinfo4 flavors[];
};
struct nfs4_secinfo_arg {
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 659045046468..93fcef105061 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -304,16 +304,33 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
* struct_size() - Calculate size of structure with trailing array.
* @p: Pointer to the structure.
* @member: Name of the array member.
- * @n: Number of elements in the array.
+ * @count: Number of elements in the array.
*
* Calculates size of memory needed for structure @p followed by an
- * array of @n @member elements.
+ * array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
-#define struct_size(p, member, n) \
- __ab_c_size(n, \
+#define struct_size(p, member, count) \
+ __ab_c_size(count, \
sizeof(*(p)->member) + __must_be_array((p)->member),\
sizeof(*(p)))
+/**
+ * flex_array_size() - Calculate size of a flexible array member
+ * within an enclosing structure.
+ *
+ * @p: Pointer to the structure.
+ * @member: Name of the flexible array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of a flexible array of @count number of @member
+ * elements, at the end of structure @p.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define flex_array_size(p, member, count) \
+ array_size(count, \
+ sizeof(*(p)->member) + __must_be_array((p)->member))
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..6be1aa559b1e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy)
* not onlined when onlining the section).
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
+ *
+ * If a driver wants to allow to offline unmovable PageOffline() pages without
+ * putting them back to the buddy, it can do so via the memory notifier by
+ * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
+ * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
+ * pages (now with a reference count of zero) are treated like free pages,
+ * allowing the containing memory block to get offlined. A driver that
+ * relies on this feature is aware that re-onlining the memory block will
+ * require to re-set the pages PageOffline() and not giving them to the
+ * buddy via online_page_callback_t.
*/
PAGE_TYPE_OPS(Offline, offline)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8e085713150c..cf2468da68e9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -538,7 +538,7 @@ static inline int lock_page_killable(struct page *page)
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*
- * Return value and mmap_sem implications depend on flags; see
+ * Return value and mmap_lock implications depend on flags; see
* __lock_page_or_retry().
*/
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9a57e6717e5c..0ad57693f392 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -550,6 +550,7 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
diff --git a/include/asm-generic/pgtable.h b/include/linux/pgtable.h
index 0a9329656ae6..56c1e8eb7bb0 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
+#ifndef _LINUX_PGTABLE_H
+#define _LINUX_PGTABLE_H
#include <linux/pfn.h>
+#include <asm/pgtable.h>
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
@@ -27,6 +28,121 @@
#define USER_PGTABLES_CEILING 0UL
#endif
+/*
+ * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
+ *
+ * The pXx_index() functions return the index of the entry in the page
+ * table page which would control the given virtual address
+ *
+ * As these functions may be used by the same code for different levels of
+ * the page table folding, they are always available, regardless of
+ * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
+ * because in such cases PTRS_PER_PxD equals 1.
+ */
+
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+
+#ifndef pmd_index
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+#define pmd_index pmd_index
+#endif
+
+#ifndef pud_index
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+#define pud_index pud_index
+#endif
+
+#ifndef pgd_index
+/* Must be a compile-time constant, so implement it as a macro */
+#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#endif
+
+#ifndef pte_offset_kernel
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+#define pte_offset_kernel pte_offset_kernel
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte))
+#else
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) ((void)(pte)) /* NOP */
+#endif
+
+/* Find an entry in the second-level page table.. */
+#ifndef pmd_offset
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+#define pmd_offset pmd_offset
+#endif
+
+#ifndef pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+}
+#define pud_offset pud_offset
+#endif
+
+static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
+{
+ return (pgd + pgd_index(address));
+};
+
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#ifndef pgd_offset
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
+#endif
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+
+/*
+ * In many cases it is known that a virtual address is mapped at PMD or PTE
+ * level, so instead of traversing all the page table levels, we can get a
+ * pointer to the PMD entry in user or kernel page table or translate a virtual
+ * address to the pointer in the PTE in the kernel page tables with simple
+ * helpers.
+ */
+static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
+}
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ pmd_t *pmd = pmd_off_k(vaddr);
+
+ return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
+}
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -133,6 +249,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
}
#endif
+#ifndef __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
@@ -1018,11 +1141,11 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
#endif
/*
* This function is meant to be used by sites walking pagetables with
- * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+ * the mmap_lock held in read mode to protect against MADV_DONTNEED and
* transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
* into a null pmd and the transhuge page fault can convert a null pmd
* into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_sem in read mode the pmd becomes
+ * fails). While holding the mmap_lock in read mode the pmd becomes
* stable and stops changing under us only if it's not null and not a
* transhuge pmd. When those races occurs and this function makes a
* difference vs the standard pmd_none_or_clear_bad, the result is
@@ -1032,7 +1155,7 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
*
* For 32bit kernels with a 64bit large pmd_t this automatically takes
* care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_sem is hold for reading by the
+ * against pmd_populate() when the mmap_lock is hold for reading by the
* caller (a special atomic read not done by "gcc" as in the generic
* version above, is also needed when THP is disabled because the page
* fault can populate the pmd from under us).
@@ -1319,4 +1442,4 @@ typedef unsigned int pgtbl_mod_mask;
#define pmd_leaf(x) 0
#endif
-#endif /* _ASM_GENERIC_PGTABLE_H */
+#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 8c05d0fb5c00..b693b609b2f5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1416,6 +1416,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 0c31b9461262..50afd0d0084c 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -9,6 +9,10 @@
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */
+#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */
+#ifdef CONFIG_WATCH_QUEUE
+#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */
+#endif
/**
* struct pipe_buffer - a linux kernel pipe buffer
@@ -34,8 +38,10 @@ struct pipe_buffer {
* @wr_wait: writer wait point in case of full pipe
* @head: The point of buffer production
* @tail: The point of buffer consumption
+ * @note_loss: The next read() should insert a data-lost message
* @max_usage: The maximum number of slots that may be used in the ring
* @ring_size: total number of buffers (should be a power of 2)
+ * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs
* @tmp_page: cached released page
* @readers: number of current readers of this pipe
* @writers: number of current writers of this pipe
@@ -46,6 +52,7 @@ struct pipe_buffer {
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
* @user: the user who created this pipe
+ * @watch_queue: If this pipe is a watch_queue, this is the stuff for that
**/
struct pipe_inode_info {
struct mutex mutex;
@@ -54,6 +61,10 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
+ unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
@@ -64,6 +75,9 @@ struct pipe_inode_info {
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
struct user_struct *user;
+#ifdef CONFIG_WATCH_QUEUE
+ struct watch_queue *watch_queue;
+#endif
};
/*
@@ -239,9 +253,20 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+#ifdef CONFIG_WATCH_QUEUE
+unsigned long account_pipe_buffers(struct user_struct *user,
+ unsigned long old, unsigned long new);
+bool too_many_pipe_buffers_soft(unsigned long user_bufs);
+bool too_many_pipe_buffers_hard(unsigned long user_bufs);
+bool pipe_is_unprivileged_user(void);
+#endif
+
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+#ifdef CONFIG_WATCH_QUEUE
+int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
+#endif
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
-struct pipe_inode_info *get_pipe_info(struct file *file);
+struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
unsigned int round_pipe_size(unsigned long size);
diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h
index 6a9b28399b39..24953981bd9f 100644
--- a/include/linux/platform_data/i2c-pxa.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -7,54 +7,6 @@
#ifndef _I2C_PXA_H_
#define _I2C_PXA_H_
-#if 0
-#define DEF_TIMEOUT 3
-#else
-/* need a longer timeout if we're dealing with the fact we may well be
- * looking at a multi-master environment
-*/
-#define DEF_TIMEOUT 32
-#endif
-
-#define BUS_ERROR (-EREMOTEIO)
-#define XFER_NAKED (-ECONNREFUSED)
-#define I2C_RETRY (-2000) /* an error has occurred retry transmit */
-
-/* ICR initialize bit values
-*
-* 15. FM 0 (100 Khz operation)
-* 14. UR 0 (No unit reset)
-* 13. SADIE 0 (Disables the unit from interrupting on slave addresses
-* matching its slave address)
-* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration
-* in master mode)
-* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode)
-* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent)
-* 9. IRFIE 1 (Enable interrupts from full buffer received)
-* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty)
-* 7. GCD 1 (Disables i2c unit response to general call messages as a slave)
-* 6. IUE 0 (Disable unit until we change settings)
-* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL)
-* 4. MA 0 (Only send stop with the ICR stop bit)
-* 3. TB 0 (We are not transmitting a byte initially)
-* 2. ACKNAK 0 (Send an ACK after the unit receives a byte)
-* 1. STOP 0 (Do not send a STOP)
-* 0. START 0 (Do not send a START)
-*
-*/
-#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE)
-
-/* I2C status register init values
- *
- * 10. BED 1 (Clear bus error detected)
- * 9. SAD 1 (Clear slave address detected)
- * 7. IRF 1 (Clear IDBR Receive Full)
- * 6. ITE 1 (Clear IDBR Transmit Empty)
- * 5. ALD 1 (Clear Arbitration Loss Detected)
- * 4. SSD 1 (Clear Slave Stop Detected)
- */
-#define I2C_ISR_INIT 0x7FF /* status register init */
-
struct i2c_pxa_platform_data {
unsigned int class;
unsigned int use_pio :1;
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index 08e639e047e5..03e92c71b3fa 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_mode ecc_mode;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index deb849bcf0ec..08675b16f9e1 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -49,7 +49,7 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_mode ecc_mode;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 747861816f4f..d5c4a329321d 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -42,6 +42,18 @@ struct dev_pm_opp_supply {
};
/**
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
+ *
+ * This structure stores the bandwidth values for a single interconnect path.
+ */
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
+};
+
+/**
* struct dev_pm_opp_info - OPP freq/voltage/current values
* @rate: Target clk rate in hz
* @supplies: Array of voltage/current values for all power supplies
@@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
void dev_pm_opp_of_register_em(struct cpumask *cpus);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
@@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
{
return -ENOTSUPP;
}
+
+static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
+{
+ return -ENOTSUPP;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index dcd5a71e6c67..ac1345a48ad0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -61,6 +61,7 @@ enum {
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_OVERCURRENT,
+ POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED,
};
enum {
@@ -139,6 +140,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_MAX,
@@ -158,6 +160,9 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -223,9 +228,9 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
- enum power_supply_usb_type *usb_types;
+ const enum power_supply_usb_type *usb_types;
size_t num_usb_types;
- enum power_supply_property *properties;
+ const enum power_supply_property *properties;
size_t num_properties;
/*
@@ -346,8 +351,12 @@ struct power_supply_battery_info {
int charge_full_design_uah; /* microAmp-hours */
int voltage_min_design_uv; /* microVolts */
int voltage_max_design_uv; /* microVolts */
+ int tricklecharge_current_ua; /* microAmps */
int precharge_current_ua; /* microAmps */
+ int precharge_voltage_max_uv; /* microVolts */
int charge_term_current_ua; /* microAmps */
+ int charge_restart_voltage_uv; /* microVolts */
+ int overvoltage_limit_uv; /* microVolts */
int constant_charge_current_max_ua; /* microAmps */
int constant_charge_voltage_max_uv; /* microVolts */
int factory_internal_resistance_uohm; /* microOhms */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 7fbc8679145c..49d155cd2dfe 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -597,7 +597,7 @@ int sev_guest_df_flush(int *error);
*/
int sev_guest_decommission(struct sev_data_decommission *data, int *error);
-void *psp_copy_user_blob(u64 __user uaddr, u32 len);
+void *psp_copy_user_blob(u64 uaddr, u32 len);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 733fad7dfbed..6d15040c642c 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u32 prod = p_chain->u.chain16.prod_idx;
+ u32 cons = p_chain->u.chain16.cons_idx;
u16 used;
- used = (u16) (((u32)0x10000 +
- (u32)p_chain->u.chain16.prod_idx) -
- (u32)p_chain->u.chain16.cons_idx);
+ if (prod < cons)
+ prod += (u32)U16_MAX + 1;
+
+ used = (u16)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ used -= prod / elem_per_page - cons / elem_per_page;
return (u16)(p_chain->capacity - used);
}
static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
{
+ u16 elem_per_page = p_chain->elem_per_page;
+ u64 prod = p_chain->u.chain32.prod_idx;
+ u64 cons = p_chain->u.chain32.cons_idx;
u32 used;
- used = (u32) (((u64)0x100000000ULL +
- (u64)p_chain->u.chain32.prod_idx) -
- (u64)p_chain->u.chain32.cons_idx);
+ if (prod < cons)
+ prod += (u64)U32_MAX + 1;
+
+ used = (u32)(prod - cons);
if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ used -= (u32)(prod / elem_per_page - cons / elem_per_page);
return p_chain->capacity - used;
}
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 7c3debb47c87..1f4048bf2674 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; }
#endif
#ifdef CONFIG_RAS_CEC
-void __init cec_init(void);
int __init parse_cec_param(char *str);
-int cec_add_elem(u64 pfn);
-#else
-static inline void __init cec_init(void) { }
-static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
#ifdef CONFIG_RAS
diff --git a/include/linux/regset.h b/include/linux/regset.h
index bf0243779738..46d6ae68c455 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos,
if (*kbuf) {
memset(*kbuf, 0, copy);
*kbuf += copy;
- } else if (__clear_user(*ubuf, copy))
+ } else if (clear_user(*ubuf, copy))
return -EFAULT;
else
*ubuf += copy;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 988d176472df..3a6adfa70fb0 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -77,7 +77,7 @@ struct anon_vma {
struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
- struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c5d96e3e7fff..b62e6aaf28f0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1197,6 +1198,9 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
@@ -1304,7 +1308,9 @@ struct task_struct {
#ifdef CONFIG_X86_MCE
u64 mce_addr;
- u64 mce_status;
+ __u64 mce_ripv : 1,
+ mce_whole_page : 1,
+ __mce_reserved : 62;
struct callback_head mce_kill_me;
#endif
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index 95fb9e025247..00c45a0e6abe 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *);
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
-extern void show_stack(struct task_struct *task, unsigned long *sp);
+extern void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl);
extern void sched_show_task(struct task_struct *p);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index a132d875d351..480a4d1b7dd8 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -53,7 +53,7 @@ void mmdrop(struct mm_struct *mm);
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
- * followed by taking the mmap_sem for writing before modifying the
+ * followed by taking the mmap_lock for writing before modifying the
* vmas or anything the coredump pretends not to change from under it.
*
* It also has to be called when mmgrab() is used in the context of
@@ -61,14 +61,14 @@ void mmdrop(struct mm_struct *mm);
* the context of the process to run down_write() on that pinned mm.
*
* NOTE: find_extend_vma() called from GUP context is the only place
- * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * that can modify the "mm" (notably the vm_start/end) under mmap_lock
* for reading and outside the context of the process, so it is also
- * the only case that holds the mmap_sem for reading that must call
- * this function. Generally if the mmap_sem is hold for reading
+ * the only case that holds the mmap_lock for reading that must call
+ * this function. Generally if the mmap_lock is hold for reading
* there's no need of this check after get_task_mm()/mmget_not_zero().
*
* This function can be obsoleted and the check can be removed, after
- * the coredump code will hold the mmap_sem for writing before
+ * the coredump code will hold the mmap_lock for writing before
* invoking the ->core_dump methods.
*/
static inline bool mmget_still_valid(struct mm_struct *mm)
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 8ccd82105de8..76731230bbc5 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -221,7 +221,7 @@ struct sctp_datahdr {
__be16 stream;
__be16 ssn;
__u32 ppid;
- __u8 payload[0];
+ __u8 payload[];
};
struct sctp_data_chunk {
@@ -269,7 +269,7 @@ struct sctp_inithdr {
__be16 num_outbound_streams;
__be16 num_inbound_streams;
__be32 initial_tsn;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_init_chunk {
@@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param {
/* Section 3.3.2.1 Host Name Address (11) */
struct sctp_hostname_param {
struct sctp_paramhdr param_hdr;
- uint8_t hostname[0];
+ uint8_t hostname[];
};
/* Section 3.3.2.1 Supported Address Types (12) */
struct sctp_supported_addrs_param {
struct sctp_paramhdr param_hdr;
- __be16 types[0];
+ __be16 types[];
};
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
@@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param {
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
+ __u8 chunks[];
};
/* AUTH Section 3.1 Random */
struct sctp_random_param {
struct sctp_paramhdr param_hdr;
- __u8 random_val[0];
+ __u8 random_val[];
};
/* AUTH Section 3.2 Chunk List */
struct sctp_chunks_param {
struct sctp_paramhdr param_hdr;
- __u8 chunks[0];
+ __u8 chunks[];
};
/* AUTH Section 3.3 HMAC Algorithm */
struct sctp_hmac_algo_param {
struct sctp_paramhdr param_hdr;
- __be16 hmac_ids[0];
+ __be16 hmac_ids[];
};
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
@@ -350,7 +350,7 @@ struct sctp_initack_chunk {
/* Section 3.3.3.1 State Cookie (7) */
struct sctp_cookie_param {
struct sctp_paramhdr p;
- __u8 body[0];
+ __u8 body[];
};
/* Section 3.3.3.1 Unrecognized Parameters (8) */
@@ -384,7 +384,7 @@ struct sctp_sackhdr {
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- union sctp_sack_variable variable[0];
+ union sctp_sack_variable variable[];
};
struct sctp_sack_chunk {
@@ -436,7 +436,7 @@ struct sctp_shutdown_chunk {
struct sctp_errhdr {
__be16 cause;
__be16 length;
- __u8 variable[0];
+ __u8 variable[];
};
struct sctp_operr_chunk {
@@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip {
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_fwdtsn_skip skip[0];
+ struct sctp_fwdtsn_skip skip[];
};
struct sctp_fwdtsn_chunk {
@@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip {
struct sctp_ifwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_ifwdtsn_skip skip[0];
+ struct sctp_ifwdtsn_skip skip[];
};
struct sctp_ifwdtsn_chunk {
@@ -658,7 +658,7 @@ struct sctp_addip_param {
struct sctp_addiphdr {
__be32 serial;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_addip_chunk {
@@ -718,7 +718,7 @@ struct sctp_addip_chunk {
struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
- __u8 hmac[0];
+ __u8 hmac[];
};
struct sctp_auth_chunk {
@@ -733,7 +733,7 @@ struct sctp_infox {
struct sctp_reconf_chunk {
struct sctp_chunkhdr chunk_hdr;
- __u8 params[0];
+ __u8 params[];
};
struct sctp_strreset_outreq {
@@ -741,13 +741,13 @@ struct sctp_strreset_outreq {
__be32 request_seq;
__be32 response_seq;
__be32 send_reset_at_tsn;
- __be16 list_of_streams[0];
+ __be16 list_of_streams[];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
__be32 request_seq;
- __be16 list_of_streams[0];
+ __be16 list_of_streams[];
};
struct sctp_strreset_tsnreq {
diff --git a/include/linux/security.h b/include/linux/security.h
index b3f2cb21b4f2..0a0a03b36a3b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -56,6 +56,8 @@ struct mm_struct;
struct fs_context;
struct fs_parameter;
enum fs_value_type;
+struct watch;
+struct watch_notification;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -390,6 +392,8 @@ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
+int security_task_fix_setgid(struct cred *new, const struct cred *old,
+ int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
@@ -1034,6 +1038,13 @@ static inline int security_task_fix_setuid(struct cred *new,
return cap_task_fix_setuid(new, old, flags);
}
+static inline int security_task_fix_setgid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+ return 0;
+}
+
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -1282,6 +1293,28 @@ static inline int security_locked_down(enum lockdown_reason what)
}
#endif /* CONFIG_SECURITY */
+#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
+int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n);
+#else
+static inline int security_post_notification(const struct cred *w_cred,
+ const struct cred *cred,
+ struct watch_notification *n)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
+int security_watch_key(struct key *key);
+#else
+static inline int security_watch_key(struct key *key)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_SECURITY_NETWORK
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
@@ -1750,8 +1783,8 @@ static inline int security_path_chroot(const struct path *path)
int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
void security_key_free(struct key *key);
-int security_key_permission(key_ref_t key_ref,
- const struct cred *cred, unsigned perm);
+int security_key_permission(key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm);
int security_key_getsecurity(struct key *key, char **_buffer);
#else
@@ -1769,7 +1802,7 @@ static inline void security_key_free(struct key *key)
static inline int security_key_permission(key_ref_t key_ref,
const struct cred *cred,
- unsigned perm)
+ enum key_need_perm need_perm)
{
return 0;
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..8b97204f35a7 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,9 +37,25 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/processor.h>
/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of seqlocks via seqlock_t
+ * interface is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
@@ -115,6 +131,7 @@ repeat:
cpu_relax();
goto repeat;
}
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret & ~1;
}
@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
/**
@@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
+ kcsan_nestable_atomic_begin();
s->sequence+=2;
+ kcsan_nestable_atomic_end();
}
typedef struct {
@@ -430,11 +463,21 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 86281ac7c305..860e0f843c12 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page)
#endif
#ifndef set_mce_nospec
-static inline int set_mce_nospec(unsigned long pfn)
+static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
return 0;
}
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 83bd8cb475d7..b7af8cc13eda 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -64,7 +64,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
- int skip; /* input argument: How many entries to skip */
+ unsigned int skip; /* input argument: How many entries to skip */
};
extern void save_stack_trace(struct stack_trace *trace);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 4f6b28487f28..98da816b5fc2 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -76,7 +76,7 @@ struct rpc_auth {
unsigned int au_verfsize; /* size of reply verifier */
unsigned int au_ralign; /* words before UL header */
- unsigned int au_flags;
+ unsigned long au_flags;
const struct rpc_authops *au_ops;
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
* differ from the flavor in
@@ -89,7 +89,8 @@ struct rpc_auth {
};
/* rpc_auth au_flags */
-#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+#define RPCAUTH_AUTH_DATATOUCH (1)
+#define RPCAUTH_AUTH_UPDATE_SLACK (2)
struct rpc_auth_create_args {
rpc_authflavor_t pseudoflavor;
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bc07e51f20d1..bf4ac8a0268c 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -84,6 +84,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ struct auth_domain *domain;
bool datatouch;
};
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fd390894a584..386628b36bc7 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -254,6 +254,7 @@ struct svc_rqst {
struct page * *rq_page_end; /* one past the last page */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
+ struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -299,6 +300,7 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ void ** rq_lease_breaker; /* The v4 client breaking a lease */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cbcfbd0521e3..7ed82625dc0b 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,7 +48,6 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
-#define SVCRDMA_DEBUG
/* Default and maximum inline threshold sizes */
enum {
@@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt {
};
/* svc_rdma_backchannel.c */
-extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
- __be32 *rdma_resp,
- struct xdr_buf *rcvbuf);
+extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *rctxt);
/* svc_rdma_recvfrom.c */
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 9e1e046de176..aca35ab5cff2 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u
return 0;
}
+static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt)
+{
+ return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) ||
+ (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0);
+}
+
int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index ca39a388dc22..f09c82b0a7ae 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -20,7 +20,8 @@ int gss_svc_init(void);
void gss_svc_shutdown(void);
int gss_svc_init_net(struct net *net);
void gss_svc_shutdown_net(struct net *net);
-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
+ char *name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 771baadaee9d..b7ac7fe68306 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,7 +28,7 @@ struct svc_sock {
/* private TCP part */
/* On-the-wire fragment header: */
- __be32 sk_reclen;
+ __be32 sk_marker;
/* As we receive a record, this includes the length received so
* far (including the fragment header): */
u32 sk_tcplen;
@@ -41,12 +41,12 @@ struct svc_sock {
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+ return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK;
}
static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+ return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT;
}
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4c5974bb9ba9..5b3216ba39a9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -313,6 +313,7 @@ struct vma_swap_readahead {
};
/* linux/mm/workingset.c */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c91b1e344d56..216185bb3014 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,20 +32,10 @@
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
-/* Default Thermal Governor */
-#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
-#define DEFAULT_THERMAL_GOVERNOR "step_wise"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE)
-#define DEFAULT_THERMAL_GOVERNOR "fair_share"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
-#define DEFAULT_THERMAL_GOVERNOR "user_space"
-#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR)
-#define DEFAULT_THERMAL_GOVERNOR "power_allocator"
-#endif
-
struct thermal_zone_device;
struct thermal_cooling_device;
struct thermal_instance;
+struct thermal_attr;
enum thermal_device_mode {
THERMAL_DEVICE_DISABLED = 0,
@@ -130,11 +120,6 @@ struct thermal_cooling_device {
struct list_head node;
};
-struct thermal_attr {
- struct device_attribute attr;
- char name[THERMAL_NAME_LENGTH];
-};
-
/**
* struct thermal_zone_device - structure for a thermal zone
* @id: unique id number for each thermal zone
@@ -347,21 +332,6 @@ struct thermal_zone_of_device_ops {
int (*set_trip_temp)(void *, int, int);
};
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @np: pointer to struct device_node that this trip point was created from
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
-
-struct thermal_trip {
- struct device_node *np;
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
-};
-
/* Function declarations */
#ifdef CONFIG_THERMAL_OF
int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
@@ -413,19 +383,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev,
#endif
-#if IS_ENABLED(CONFIG_THERMAL)
-static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
-{
- return cdev->ops->get_requested_power && cdev->ops->state2power &&
- cdev->ops->power2state;
-}
-
-int power_actor_get_max_power(struct thermal_cooling_device *,
- struct thermal_zone_device *tz, u32 *max_power);
-int power_actor_get_min_power(struct thermal_cooling_device *,
- struct thermal_zone_device *tz, u32 *min_power);
-int power_actor_set_power(struct thermal_cooling_device *,
- struct thermal_instance *, u32);
+#ifdef CONFIG_THERMAL
struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
void *, struct thermal_zone_device_ops *,
struct thermal_zone_params *, int, int);
@@ -439,7 +397,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
-void thermal_zone_set_trips(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(const char *,
void *, const struct thermal_cooling_device_ops *);
@@ -457,24 +414,9 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
-int get_tz_trend(struct thermal_zone_device *, int);
-struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
- struct thermal_cooling_device *, int);
void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
#else
-static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
-{ return false; }
-static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *max_power)
-{ return 0; }
-static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
- u32 *min_power)
-{ return -ENODEV; }
-static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
- struct thermal_instance *tz, u32 power)
-{ return 0; }
static inline struct thermal_zone_device *thermal_zone_device_register(
const char *type, int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
@@ -484,21 +426,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register(
static inline void thermal_zone_device_unregister(
struct thermal_zone_device *tz)
{ }
-static inline int thermal_zone_bind_cooling_device(
- struct thermal_zone_device *tz, int trip,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight)
-{ return -ENODEV; }
-static inline int thermal_zone_unbind_cooling_device(
- struct thermal_zone_device *tz, int trip,
- struct thermal_cooling_device *cdev)
-{ return -ENODEV; }
-static inline void thermal_zone_device_update(struct thermal_zone_device *tz,
- enum thermal_notify_event event)
-{ }
-static inline void thermal_zone_set_trips(struct thermal_zone_device *tz)
-{ }
static inline struct thermal_cooling_device *
thermal_cooling_device_register(char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
@@ -530,12 +457,7 @@ static inline int thermal_zone_get_slope(
static inline int thermal_zone_get_offset(
struct thermal_zone_device *tz)
{ return -ENODEV; }
-static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
-{ return -ENODEV; }
-static inline struct thermal_instance *
-get_thermal_instance(struct thermal_zone_device *tz,
- struct thermal_cooling_device *cdev, int trip)
-{ return ERR_PTR(-ENODEV); }
+
static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
{ }
static inline void thermal_notify_framework(struct thermal_zone_device *tz,
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
index 299cbb8c63bb..44073d06710f 100644
--- a/include/linux/tifm.h
+++ b/include/linux/tifm.h
@@ -124,7 +124,7 @@ struct tifm_adapter {
int (*has_ms_pif)(struct tifm_adapter *fm,
struct tifm_dev *sock);
- struct tifm_dev *sockets[0];
+ struct tifm_dev *sockets[];
};
struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9861c89f93be..0a76ddc07d59 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,9 +2,9 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,7 +58,7 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -301,72 +301,33 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
-/*
- * probe_kernel_read(): safely attempt to read from a location
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
-extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
-extern long __probe_kernel_read(void *dst, const void *src, size_t size);
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
-/*
- * probe_user_read(): safely attempt to read from a location in user space
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long probe_user_read(void *dst, const void __user *src, size_t size);
-extern long __probe_user_read(void *dst, const void __user *src, size_t size);
+long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
+long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
-/*
- * probe_kernel_write(): safely attempt to write to a location
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
-extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
+long notrace copy_to_user_nofault(void __user *dst, const void *src,
+ size_t size);
-/*
- * probe_user_write(): safely attempt to write to a location in user space
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
-extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
+ long count);
-extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
- long count);
-extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
- long count);
-extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
+long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
+ long count);
+long strnlen_user_nofault(const void __user *unsafe_addr, long count);
/**
- * probe_kernel_address(): safely attempt to read from a location
- * @addr: address to read from
- * @retval: read into this variable
+ * get_kernel_nofault(): safely attempt to read from a location
+ * @val: read into this variable
+ * @ptr: address to read from
*
* Returns 0 on success, or -EFAULT.
*/
-#define probe_kernel_address(addr, retval) \
- probe_kernel_read(&retval, addr, sizeof(retval))
+#define get_kernel_nofault(val, ptr) ({ \
+ const typeof(val) *__gk_ptr = (ptr); \
+ copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
+})
#ifndef user_access_begin
#define user_access_begin(ptr,len) access_ok(ptr, len)
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 5453af87a33e..239db794357c 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -18,6 +18,16 @@ struct vdpa_callback {
};
/**
+ * vDPA notification area
+ * @addr: base address of the notification area
+ * @size: size of the notification area
+ */
+struct vdpa_notification_area {
+ resource_size_t addr;
+ resource_size_t size;
+};
+
+/**
* vDPA device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
@@ -73,6 +83,10 @@ struct vdpa_device {
* @vdev: vdpa device
* @idx: virtqueue index
* Returns virtqueue state (last_avail_idx)
+ * @get_vq_notification: Get the notification area for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns the notifcation area
* @get_vq_align: Get the virtqueue align requirement
* for the device
* @vdev: vdpa device
@@ -162,6 +176,8 @@ struct vdpa_config_ops {
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
+ struct vdpa_notification_area
+ (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 48bb681e6c2a..0221f852a7e1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -106,7 +106,6 @@ extern void *vzalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 9e2763d7c159..59bd50f99291 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -105,9 +105,9 @@ struct vringh_kiov {
/* Helpers for userspace vrings. */
int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used);
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used);
static inline void vringh_iov_init(struct vringh_iov *iov,
struct iovec *iovec, unsigned num)
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
new file mode 100644
index 000000000000..5e08db2adc31
--- /dev/null
+++ b/include/linux/watch_queue.h
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/* User-mappable watch queue
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/watch_queue.rst
+ */
+
+#ifndef _LINUX_WATCH_QUEUE_H
+#define _LINUX_WATCH_QUEUE_H
+
+#include <uapi/linux/watch_queue.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+
+#ifdef CONFIG_WATCH_QUEUE
+
+struct cred;
+
+struct watch_type_filter {
+ enum watch_notification_type type;
+ __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */
+ __u32 info_filter; /* Filter on watch_notification::info */
+ __u32 info_mask; /* Mask of relevant bits in info_filter */
+};
+
+struct watch_filter {
+ union {
+ struct rcu_head rcu;
+ unsigned long type_filter[2]; /* Bitmask of accepted types */
+ };
+ u32 nr_filters; /* Number of filters */
+ struct watch_type_filter filters[];
+};
+
+struct watch_queue {
+ struct rcu_head rcu;
+ struct watch_filter __rcu *filter;
+ struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */
+ struct hlist_head watches; /* Contributory watches */
+ struct page **notes; /* Preallocated notifications */
+ unsigned long *notes_bitmap; /* Allocation bitmap for notes */
+ struct kref usage; /* Object usage count */
+ spinlock_t lock;
+ unsigned int nr_notes; /* Number of notes */
+ unsigned int nr_pages; /* Number of pages in notes[] */
+ bool defunct; /* T when queues closed */
+};
+
+/*
+ * Representation of a watch on an object.
+ */
+struct watch {
+ union {
+ struct rcu_head rcu;
+ u32 info_id; /* ID to be OR'd in to info field */
+ };
+ struct watch_queue __rcu *queue; /* Queue to post events to */
+ struct hlist_node queue_node; /* Link in queue->watches */
+ struct watch_list __rcu *watch_list;
+ struct hlist_node list_node; /* Link in watch_list->watchers */
+ const struct cred *cred; /* Creds of the owner of the watch */
+ void *private; /* Private data for the watched object */
+ u64 id; /* Internal identifier */
+ struct kref usage; /* Object usage count */
+};
+
+/*
+ * List of watches on an object.
+ */
+struct watch_list {
+ struct rcu_head rcu;
+ struct hlist_head watchers;
+ void (*release_watch)(struct watch *);
+ spinlock_t lock;
+};
+
+extern void __post_watch_notification(struct watch_list *,
+ struct watch_notification *,
+ const struct cred *,
+ u64);
+extern struct watch_queue *get_watch_queue(int);
+extern void put_watch_queue(struct watch_queue *);
+extern void init_watch(struct watch *, struct watch_queue *);
+extern int add_watch_to_object(struct watch *, struct watch_list *);
+extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool);
+extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int);
+extern long watch_queue_set_filter(struct pipe_inode_info *,
+ struct watch_notification_filter __user *);
+extern int watch_queue_init(struct pipe_inode_info *);
+extern void watch_queue_clear(struct watch_queue *);
+
+static inline void init_watch_list(struct watch_list *wlist,
+ void (*release_watch)(struct watch *))
+{
+ INIT_HLIST_HEAD(&wlist->watchers);
+ spin_lock_init(&wlist->lock);
+ wlist->release_watch = release_watch;
+}
+
+static inline void post_watch_notification(struct watch_list *wlist,
+ struct watch_notification *n,
+ const struct cred *cred,
+ u64 id)
+{
+ if (unlikely(wlist))
+ __post_watch_notification(wlist, n, cred, id);
+}
+
+static inline void remove_watch_list(struct watch_list *wlist, u64 id)
+{
+ if (wlist) {
+ remove_watch_from_object(wlist, NULL, id, true);
+ kfree_rcu(wlist, rcu);
+ }
+}
+
+/**
+ * watch_sizeof - Calculate the information part of the size of a watch record,
+ * given the structure size.
+ */
+#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT)
+
+#endif
+
+#endif /* _LINUX_WATCH_QUEUE_H */
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
index 5604818d137e..5be313cbf7d7 100644
--- a/include/media/videobuf2-dma-contig.h
+++ b/include/media/videobuf2-dma-contig.h
@@ -25,7 +25,7 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
}
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size);
-void vb2_dma_contig_clear_max_seg_size(struct device *dev);
+static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { }
extern const struct vb2_mem_ops vb2_dma_contig_memops;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b58ad1a3f695..fc7e8807838d 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5075,7 +5075,8 @@ struct cfg80211_cqm_config;
* by cfg80211 on change_interface
* @mgmt_registrations: list of registrations for management frames
* @mgmt_registrations_lock: lock for the list
- * @mgmt_registrations_update_wk: update work to defer from atomic context
+ * @mgmt_registrations_need_update: mgmt registrations were updated,
+ * need to propagate the update to the driver
* @mtx: mutex used to lock data in this struct, may be used by drivers
* and some API functions require it held
* @beacon_interval: beacon interval used on this device for transmitting
@@ -5121,7 +5122,7 @@ struct wireless_dev {
struct list_head mgmt_registrations;
spinlock_t mgmt_registrations_lock;
- struct work_struct mgmt_registrations_update_wk;
+ u8 mgmt_registrations_need_update:1;
struct mutex mtx;
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 69e13c8b6b3a..6315324b9dc2 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -450,6 +450,7 @@ struct flow_block_indr {
struct net_device *dev;
enum flow_block_binder_type binder_type;
void *data;
+ void *cb_priv;
void (*cleanup)(struct flow_block_cb *block_cb);
};
@@ -467,6 +468,13 @@ struct flow_block_cb {
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv),
+ struct flow_block_offload *bo,
+ struct net_device *dev, void *data,
+ void *indr_cb_priv,
+ void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb);
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
@@ -488,6 +496,13 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
list_move(&block_cb->list, &offload->cb_list);
}
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+ struct flow_block_offload *offload)
+{
+ list_del(&block_cb->indr.list);
+ list_move(&block_cb->list, &offload->cb_list);
+}
+
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
struct list_head *driver_block_list);
@@ -532,38 +547,16 @@ static inline void flow_block_init(struct flow_block *flow_block)
}
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
- enum tc_setup_type type, void *type_data);
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
- flow_setup_cb_t *setup_cb);
+ void (*release)(void *cb_priv));
int flow_indr_dev_setup_offload(struct net_device *dev,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb));
-typedef void flow_indr_block_cmd_t(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb, void *cb_priv,
- enum flow_block_command command);
-
-int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
- flow_indr_block_bind_cb_t *cb,
- void *cb_ident);
-
-void __flow_indr_block_cb_unregister(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_ident);
-
-int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
- flow_indr_block_bind_cb_t *cb, void *cb_ident);
-
-void flow_indr_block_cb_unregister(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_ident);
-
-void flow_indr_block_call(struct net_device *dev,
- struct flow_block_offload *bo,
- enum flow_block_command command,
- enum tc_setup_type type);
-
#endif /* _NET_FLOW_OFFLOAD_H */
diff --git a/include/net/gue.h b/include/net/gue.h
index 3a6595bfa641..e42402f180b7 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -21,7 +21,7 @@
* | |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
- * C bit indicates contol message when set, data message when unset.
+ * C bit indicates control message when set, data message when unset.
* For a control message, proto/ctype is interpreted as a type of
* control message. For data messages, proto/ctype is the IP protocol
* of the next header.
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index ad64ba6a057f..92560974ea67 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -185,6 +185,12 @@ static inline spinlock_t *inet_ehash_lockp(
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
+static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
+{
+ kfree(h->lhash2);
+ h->lhash2 = NULL;
+}
+
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
kvfree(hashinfo->ehash_locks);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index e1476775769c..81ee17594c32 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -392,13 +392,12 @@ void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
int neigh_proc_dointvec(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
- void __user *buffer,
+ void *buffer,
size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
proc_handler *proc_handler);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index d7338bfd7b0f..16e8b2f8d006 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -161,10 +161,51 @@ struct nf_flow_route {
struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
-int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
- flow_setup_cb_t *cb, void *cb_priv);
-void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
- flow_setup_cb_t *cb, void *cb_priv);
+static inline int
+nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+ int err = 0;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
+ if (IS_ERR(block_cb)) {
+ err = PTR_ERR(block_cb);
+ goto unlock;
+ }
+
+ list_add_tail(&block_cb->list, &block->cb_list);
+
+unlock:
+ up_write(&flow_table->flow_block_lock);
+ return err;
+}
+
+static inline void
+nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ flow_setup_cb_t *cb, void *cb_priv)
+{
+ struct flow_block *block = &flow_table->flow_block;
+ struct flow_block_cb *block_cb;
+
+ down_write(&flow_table->flow_block_lock);
+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+ if (block_cb) {
+ list_del(&block_cb->list);
+ flow_block_cb_free(block_cb);
+ } else {
+ WARN_ON(true);
+ }
+ up_write(&flow_table->flow_block_lock);
+}
int flow_offload_route_init(struct flow_offload *flow,
const struct nf_flow_route *route);
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index e4b55b43e907..3a4f9e3b91a5 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -76,6 +76,7 @@ struct nh_group {
struct nh_group *spare; /* spare group for removals */
u16 num_nh;
bool mpath;
+ bool fdb_nh;
bool has_v4;
struct nh_grp_entry nh_entries[];
};
@@ -93,7 +94,6 @@ struct nexthop {
u8 protocol; /* app managing this nh */
u8 nh_flags;
bool is_group;
- bool is_fdb_nh;
refcount_t refcnt;
struct rcu_head rcu;
@@ -136,6 +136,32 @@ static inline bool nexthop_cmp(const struct nexthop *nh1,
return nh1 == nh2;
}
+static inline bool nexthop_is_fdb(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ const struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->fdb_nh;
+ } else {
+ const struct nh_info *nhi;
+
+ nhi = rcu_dereference_rtnl(nh->nh_info);
+ return nhi->fdb_nh;
+ }
+}
+
+static inline bool nexthop_has_v4(const struct nexthop *nh)
+{
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ return nh_grp->has_v4;
+ }
+ return false;
+}
+
static inline bool nexthop_is_multipath(const struct nexthop *nh)
{
if (nh->is_group) {
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 15b4d9aec7ff..122d9e2d8dfd 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -353,11 +353,13 @@ enum {
ipv4_is_anycast_6to4(a))
/* Flags used for the bind address copy functions. */
-#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by
local sock family */
-#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by
+ local sock family */
+#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by
peer */
-#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by
peer */
/* Reasons to retransmit. */
diff --git a/include/net/sock.h b/include/net/sock.h
index c53cc42b5ab9..3428619faae4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1848,7 +1848,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
- sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index 79654bcb9a29..8250d6f0a462 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -66,7 +66,16 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
#endif /* CONFIG_NF_CONNTRACK */
#if IS_ENABLED(CONFIG_NET_ACT_CT)
-void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie);
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
+{
+ enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
+ struct nf_conn *ct;
+
+ ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
+ nf_conntrack_get(&ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+}
#else
static inline void
tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 094fe682f5d7..c7d213c9f9d8 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1008,6 +1008,7 @@ struct xfrm_offload {
#define XFRM_GRO 32
#define XFRM_ESP_NO_TRAILER 64
#define XFRM_DEV_RESUME 128
+#define XFRM_XMIT 256
__u32 status;
#define CRYPTO_SUCCESS 1
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index b65220685920..8c5e38180fb0 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -161,4 +161,15 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
#define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
+struct dmaengine_pcm {
+ struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+ const struct snd_dmaengine_pcm_config *config;
+ struct snd_soc_component component;
+ unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+ return container_of(p, struct dmaengine_pcm, component);
+}
#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index ef5dd28e10a9..2756f9bcac3e 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -444,6 +444,8 @@ int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
+struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
+ const char *driver_name);
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name);
@@ -1361,6 +1363,10 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+ struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming);
void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 063f133e47c2..6adf4d71acf6 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -150,9 +150,10 @@ void transport_deregister_session(struct se_session *);
void transport_init_se_cmd(struct se_cmd *,
const struct target_core_fabric_ops *,
- struct se_session *, u32, int, int, unsigned char *);
-sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
-sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
+ struct se_session *, u32, int, int, unsigned char *, u64);
+sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
+sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *);
+sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
unsigned char *, unsigned char *, u64, u32, int, int, int,
struct scatterlist *, u32, struct scatterlist *, u32,
@@ -187,7 +188,7 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
-int transport_lookup_tmr_lun(struct se_cmd *, u64);
+int transport_lookup_tmr_lun(struct se_cmd *);
void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 81b43f5bdf23..93b114226af8 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -254,16 +254,15 @@ TRACE_EVENT(block_bio_bounce,
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
- * @error: io error value
*
* This tracepoint indicates there is no further work to do on this
* block IO operation @bio.
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+ TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio, error),
+ TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -277,7 +276,7 @@ TRACE_EVENT(block_bio_complete,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- __entry->error = error;
+ __entry->error = blk_status_to_errno(bio->bi_status);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 24c2557c37f0..8639ab962a71 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
TRACE_DEFINE_ENUM(CP_TRIMMED);
TRACE_DEFINE_ENUM(CP_PAUSE);
+TRACE_DEFINE_ENUM(CP_RESIZE);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -126,7 +127,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
{ CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" }, \
{ CP_PAUSE, "Pause" }, \
- { CP_TRIMMED, "Trimmed" })
+ { CP_TRIMMED, "Trimmed" }, \
+ { CP_RESIZE, "Resize" })
#define show_fsync_cpreason(type) \
__print_symbolic(type, \
@@ -154,7 +156,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
__print_symbolic(type, \
{ COMPRESS_LZO, "LZO" }, \
{ COMPRESS_LZ4, "LZ4" }, \
- { COMPRESS_ZSTD, "ZSTD" })
+ { COMPRESS_ZSTD, "ZSTD" }, \
+ { COMPRESS_LZORLE, "LZO-RLE" })
struct f2fs_sb_info;
struct f2fs_io_info;
@@ -1812,6 +1815,82 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
TP_ARGS(inode, cluster_idx, compressed_size, ret)
);
+TRACE_EVENT(f2fs_iostat,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat),
+
+ TP_ARGS(sbi, iostat),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long long, app_dio)
+ __field(unsigned long long, app_bio)
+ __field(unsigned long long, app_wio)
+ __field(unsigned long long, app_mio)
+ __field(unsigned long long, fs_dio)
+ __field(unsigned long long, fs_nio)
+ __field(unsigned long long, fs_mio)
+ __field(unsigned long long, fs_gc_dio)
+ __field(unsigned long long, fs_gc_nio)
+ __field(unsigned long long, fs_cp_dio)
+ __field(unsigned long long, fs_cp_nio)
+ __field(unsigned long long, fs_cp_mio)
+ __field(unsigned long long, app_drio)
+ __field(unsigned long long, app_brio)
+ __field(unsigned long long, app_rio)
+ __field(unsigned long long, app_mrio)
+ __field(unsigned long long, fs_drio)
+ __field(unsigned long long, fs_gdrio)
+ __field(unsigned long long, fs_cdrio)
+ __field(unsigned long long, fs_nrio)
+ __field(unsigned long long, fs_mrio)
+ __field(unsigned long long, fs_discard)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->app_dio = iostat[APP_DIRECT_IO];
+ __entry->app_bio = iostat[APP_BUFFERED_IO];
+ __entry->app_wio = iostat[APP_WRITE_IO];
+ __entry->app_mio = iostat[APP_MAPPED_IO];
+ __entry->fs_dio = iostat[FS_DATA_IO];
+ __entry->fs_nio = iostat[FS_NODE_IO];
+ __entry->fs_mio = iostat[FS_META_IO];
+ __entry->fs_gc_dio = iostat[FS_GC_DATA_IO];
+ __entry->fs_gc_nio = iostat[FS_GC_NODE_IO];
+ __entry->fs_cp_dio = iostat[FS_CP_DATA_IO];
+ __entry->fs_cp_nio = iostat[FS_CP_NODE_IO];
+ __entry->fs_cp_mio = iostat[FS_CP_META_IO];
+ __entry->app_drio = iostat[APP_DIRECT_READ_IO];
+ __entry->app_brio = iostat[APP_BUFFERED_READ_IO];
+ __entry->app_rio = iostat[APP_READ_IO];
+ __entry->app_mrio = iostat[APP_MAPPED_READ_IO];
+ __entry->fs_drio = iostat[FS_DATA_READ_IO];
+ __entry->fs_gdrio = iostat[FS_GDATA_READ_IO];
+ __entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
+ __entry->fs_nrio = iostat[FS_NODE_READ_IO];
+ __entry->fs_mrio = iostat[FS_META_READ_IO];
+ __entry->fs_discard = iostat[FS_DISCARD];
+ ),
+
+ TP_printk("dev = (%d,%d), "
+ "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
+ "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "gc [data=%llu, node=%llu], "
+ "cp [data=%llu, node=%llu, meta=%llu], "
+ "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
+ "fs [data=%llu, (gc_data=%llu, compr_data=%llu), "
+ "node=%llu, meta=%llu]",
+ show_dev(__entry->dev), __entry->app_wio, __entry->app_dio,
+ __entry->app_bio, __entry->app_mio, __entry->fs_dio,
+ __entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
+ __entry->fs_cp_nio, __entry->fs_cp_mio,
+ __entry->app_rio, __entry->app_drio, __entry->app_brio,
+ __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio,
+ __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 32d88c4fb063..b9b51a4b1db1 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -17,6 +17,16 @@
** GSS-API related trace events
**/
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_NONE);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_INTEGRITY);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_PRIVACY);
+
+#define show_gss_service(x) \
+ __print_symbolic(x, \
+ { RPC_GSS_SVC_NONE, "none" }, \
+ { RPC_GSS_SVC_INTEGRITY, "integrity" }, \
+ { RPC_GSS_SVC_PRIVACY, "privacy" })
+
TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
@@ -126,6 +136,40 @@ DEFINE_GSSAPI_EVENT(verify_mic);
DEFINE_GSSAPI_EVENT(wrap);
DEFINE_GSSAPI_EVENT(unwrap);
+DECLARE_EVENT_CLASS(rpcgss_ctx_class,
+ TP_PROTO(
+ const struct gss_cred *gc
+ ),
+
+ TP_ARGS(gc),
+
+ TP_STRUCT__entry(
+ __field(const void *, cred)
+ __field(unsigned long, service)
+ __string(principal, gc->gc_principal)
+ ),
+
+ TP_fast_assign(
+ __entry->cred = gc;
+ __entry->service = gc->gc_service;
+ __assign_str(principal, gc->gc_principal)
+ ),
+
+ TP_printk("cred=%p service=%s principal='%s'",
+ __entry->cred, show_gss_service(__entry->service),
+ __get_str(principal))
+);
+
+#define DEFINE_CTX_EVENT(name) \
+ DEFINE_EVENT(rpcgss_ctx_class, rpcgss_ctx_##name, \
+ TP_PROTO( \
+ const struct gss_cred *gc \
+ ), \
+ TP_ARGS(gc))
+
+DEFINE_CTX_EVENT(init);
+DEFINE_CTX_EVENT(destroy);
+
TRACE_EVENT(rpcgss_svc_accept_upcall,
TP_PROTO(
__be32 xid,
@@ -291,6 +335,40 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->ret ? "" : "un")
);
+TRACE_EVENT(rpcgss_update_slack,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpc_auth *auth
+ ),
+
+ TP_ARGS(task, auth),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(const void *, auth)
+ __field(unsigned int, rslack)
+ __field(unsigned int, ralign)
+ __field(unsigned int, verfsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __entry->auth = auth;
+ __entry->rslack = auth->au_rslack;
+ __entry->ralign = auth->au_ralign;
+ __entry->verfsize = auth->au_verfsize;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->auth, __entry->rslack, __entry->ralign,
+ __entry->verfsize)
+);
+
DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
TP_PROTO(
__be32 xid,
@@ -371,6 +449,7 @@ TRACE_EVENT(rpcgss_upcall_result,
TRACE_EVENT(rpcgss_context,
TP_PROTO(
+ u32 window_size,
unsigned long expiry,
unsigned long now,
unsigned int timeout,
@@ -378,12 +457,13 @@ TRACE_EVENT(rpcgss_context,
const u8 *data
),
- TP_ARGS(expiry, now, timeout, len, data),
+ TP_ARGS(window_size, expiry, now, timeout, len, data),
TP_STRUCT__entry(
__field(unsigned long, expiry)
__field(unsigned long, now)
__field(unsigned int, timeout)
+ __field(u32, window_size)
__field(int, len)
__string(acceptor, data)
),
@@ -392,13 +472,14 @@ TRACE_EVENT(rpcgss_context,
__entry->expiry = expiry;
__entry->now = now;
__entry->timeout = timeout;
+ __entry->window_size = window_size;
__entry->len = len;
strncpy(__get_str(acceptor), data, len);
),
- TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
- __entry->expiry, __entry->now, __entry->timeout,
- __entry->len, __get_str(acceptor))
+ TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+ __entry->window_size, __entry->expiry, __entry->now,
+ __entry->timeout, __entry->len, __get_str(acceptor))
);
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 132c3c778a43..0f05a6e2b9cb 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -380,12 +380,8 @@ TRACE_EVENT(xprtrdma_inline_thresh,
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
-DEFINE_CONN_EVENT(flush_dct);
-DEFINE_RXPRT_EVENT(xprtrdma_create);
-DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
-DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
TRACE_EVENT(xprtrdma_op_connect,
@@ -1279,38 +1275,42 @@ TRACE_EVENT(xprtrdma_leaked_rep,
** Server-side RPC/RDMA events
**/
-DECLARE_EVENT_CLASS(svcrdma_xprt_event,
+DECLARE_EVENT_CLASS(svcrdma_accept_class,
TP_PROTO(
- const struct svc_xprt *xprt
+ const struct svcxprt_rdma *rdma,
+ long status
),
- TP_ARGS(xprt),
+ TP_ARGS(rdma, status),
TP_STRUCT__entry(
- __field(const void *, xprt)
- __string(addr, xprt->xpt_remotebuf)
+ __field(long, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s",
- __entry->xprt, __get_str(addr)
+ TP_printk("addr=%s status=%ld",
+ __get_str(addr), __entry->status
)
);
-#define DEFINE_XPRT_EVENT(name) \
- DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
- TP_PROTO( \
- const struct svc_xprt *xprt \
- ), \
- TP_ARGS(xprt))
+#define DEFINE_ACCEPT_EVENT(name) \
+ DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ long status \
+ ), \
+ TP_ARGS(rdma, status))
-DEFINE_XPRT_EVENT(accept);
-DEFINE_XPRT_EVENT(fail);
-DEFINE_XPRT_EVENT(free);
+DEFINE_ACCEPT_EVENT(pd);
+DEFINE_ACCEPT_EVENT(qp);
+DEFINE_ACCEPT_EVENT(fabric);
+DEFINE_ACCEPT_EVENT(initdepth);
+DEFINE_ACCEPT_EVENT(accept);
TRACE_DEFINE_ENUM(RDMA_MSG);
TRACE_DEFINE_ENUM(RDMA_NOMSG);
@@ -1355,7 +1355,7 @@ TRACE_EVENT(svcrdma_decode_rqst,
show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
);
-TRACE_EVENT(svcrdma_decode_short,
+TRACE_EVENT(svcrdma_decode_short_err,
TP_PROTO(
unsigned int hdrlen
),
@@ -1399,7 +1399,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
);
#define DEFINE_BADREQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
+ DEFINE_EVENT(svcrdma_badreq_event, \
+ svcrdma_decode_##name##_err, \
TP_PROTO( \
__be32 *p \
), \
@@ -1583,28 +1584,117 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);
-TRACE_EVENT(svcrdma_dma_map_rwctx,
+TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ unsigned int nents,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, nents, status),
TP_STRUCT__entry(
__field(int, status)
+ __field(unsigned int, nents)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
+ __entry->nents = nents;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s nents=%u status=%d",
+ __get_str(addr), __get_str(device), __entry->nents,
+ __entry->status
+ )
+);
+
+TRACE_EVENT(svcrdma_no_rwctx_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ unsigned int num_sges
+ ),
+
+ TP_ARGS(rdma, num_sges),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, num_sges)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->num_sges = num_sges;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s num_sges=%d",
+ __get_str(addr), __get_str(device), __entry->num_sges
+ )
+);
+
+TRACE_EVENT(svcrdma_page_overrun_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ const struct svc_rqst *rqst,
+ unsigned int pageno
+ ),
+
+ TP_ARGS(rdma, rqst, pageno),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, pageno)
+ __field(u32, xid)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->pageno = pageno;
+ __entry->xid = __be32_to_cpu(rqst->rq_xid);
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
+ __get_str(device), __entry->xid, __entry->pageno
+ )
+);
+
+TRACE_EVENT(svcrdma_small_wrch_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ unsigned int remaining,
+ unsigned int seg_no,
+ unsigned int num_segs
+ ),
+
+ TP_ARGS(rdma, remaining, seg_no, num_segs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, remaining)
+ __field(unsigned int, seg_no)
+ __field(unsigned int, num_segs)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->remaining = remaining;
+ __entry->seg_no = seg_no;
+ __entry->num_segs = num_segs;
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s status=%d",
- __get_str(addr), __get_str(device), __entry->status
+ TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
+ __get_str(addr), __get_str(device), __entry->remaining,
+ __entry->seg_no, __entry->num_segs
)
);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index ba9efdc848f9..059b6e45a028 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -400,7 +400,7 @@ enum rxrpc_tx_point {
EM(rxrpc_cong_begin_retransmission, " Retrans") \
EM(rxrpc_cong_cleared_nacks, " Cleared") \
EM(rxrpc_cong_new_low_nack, " NewLowN") \
- EM(rxrpc_cong_no_change, "") \
+ EM(rxrpc_cong_no_change, " -") \
EM(rxrpc_cong_progress, " Progres") \
EM(rxrpc_cong_retransmit_again, " ReTxAgn") \
EM(rxrpc_cong_rtt_window_end, " RttWinE") \
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ffd2215950dc..6a12935b8b14 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,14 +14,50 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
-DECLARE_EVENT_CLASS(xdr_buf_class,
+TRACE_DEFINE_ENUM(SOCK_STREAM);
+TRACE_DEFINE_ENUM(SOCK_DGRAM);
+TRACE_DEFINE_ENUM(SOCK_RAW);
+TRACE_DEFINE_ENUM(SOCK_RDM);
+TRACE_DEFINE_ENUM(SOCK_SEQPACKET);
+TRACE_DEFINE_ENUM(SOCK_DCCP);
+TRACE_DEFINE_ENUM(SOCK_PACKET);
+
+#define show_socket_type(type) \
+ __print_symbolic(type, \
+ { SOCK_STREAM, "STREAM" }, \
+ { SOCK_DGRAM, "DGRAM" }, \
+ { SOCK_RAW, "RAW" }, \
+ { SOCK_RDM, "RDM" }, \
+ { SOCK_SEQPACKET, "SEQPACKET" }, \
+ { SOCK_DCCP, "DCCP" }, \
+ { SOCK_PACKET, "PACKET" })
+
+/* This list is known to be incomplete, add new enums as needed. */
+TRACE_DEFINE_ENUM(AF_UNSPEC);
+TRACE_DEFINE_ENUM(AF_UNIX);
+TRACE_DEFINE_ENUM(AF_LOCAL);
+TRACE_DEFINE_ENUM(AF_INET);
+TRACE_DEFINE_ENUM(AF_INET6);
+
+#define rpc_show_address_family(family) \
+ __print_symbolic(family, \
+ { AF_UNSPEC, "AF_UNSPEC" }, \
+ { AF_UNIX, "AF_UNIX" }, \
+ { AF_LOCAL, "AF_LOCAL" }, \
+ { AF_INET, "AF_INET" }, \
+ { AF_INET6, "AF_INET6" })
+
+DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
TP_PROTO(
+ const struct rpc_task *task,
const struct xdr_buf *xdr
),
- TP_ARGS(xdr),
+ TP_ARGS(task, xdr),
TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(const void *, head_base)
__field(size_t, head_len)
__field(const void *, tail_base)
@@ -31,6 +67,8 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
),
TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
__entry->head_base = xdr->head[0].iov_base;
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
@@ -39,23 +77,137 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
__entry->msg_len = xdr->len;
),
- TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->task_id, __entry->client_id,
__entry->head_base, __entry->head_len, __entry->page_len,
__entry->tail_base, __entry->tail_len, __entry->msg_len
)
);
-#define DEFINE_XDRBUF_EVENT(name) \
- DEFINE_EVENT(xdr_buf_class, name, \
+#define DEFINE_RPCXDRBUF_EVENT(name) \
+ DEFINE_EVENT(rpc_xdr_buf_class, \
+ rpc_xdr_##name, \
TP_PROTO( \
+ const struct rpc_task *task, \
const struct xdr_buf *xdr \
), \
- TP_ARGS(xdr))
+ TP_ARGS(task, xdr))
+
+DEFINE_RPCXDRBUF_EVENT(sendto);
+DEFINE_RPCXDRBUF_EVENT(recvfrom);
+DEFINE_RPCXDRBUF_EVENT(reply_pages);
+
+
+DECLARE_EVENT_CLASS(rpc_clnt_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt
+ ),
+
+ TP_ARGS(clnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ ),
+
+ TP_printk("clid=%u", __entry->client_id)
+);
+
+#define DEFINE_RPC_CLNT_EVENT(name) \
+ DEFINE_EVENT(rpc_clnt_class, \
+ rpc_clnt_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt \
+ ), \
+ TP_ARGS(clnt))
+
+DEFINE_RPC_CLNT_EVENT(free);
+DEFINE_RPC_CLNT_EVENT(killall);
+DEFINE_RPC_CLNT_EVENT(shutdown);
+DEFINE_RPC_CLNT_EVENT(release);
+DEFINE_RPC_CLNT_EVENT(replace_xprt);
+DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+
+TRACE_EVENT(rpc_clnt_new,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt,
+ const char *program,
+ const char *server
+ ),
+
+ TP_ARGS(clnt, xprt, program, server),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ __string(program, program)
+ __string(server, server)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(program, program)
+ __assign_str(server, server)
+ ),
+
+ TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
+ __entry->client_id, __get_str(addr), __get_str(port),
+ __get_str(program), __get_str(server))
+);
+
+TRACE_EVENT(rpc_clnt_new_err,
+ TP_PROTO(
+ const char *program,
+ const char *server,
+ int error
+ ),
+
+ TP_ARGS(program, server, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __string(program, program)
+ __string(server, server)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __assign_str(program, program)
+ __assign_str(server, server)
+ ),
+
+ TP_printk("program=%s server=%s error=%d",
+ __get_str(program), __get_str(server), __entry->error)
+);
+
+TRACE_EVENT(rpc_clnt_clone_err,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ int error
+ ),
+
+ TP_ARGS(clnt, error),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ __entry->error = error;
+ ),
+
+ TP_printk("client=%u error=%d", __entry->client_id, __entry->error)
+);
-DEFINE_XDRBUF_EVENT(xprt_sendto);
-DEFINE_XDRBUF_EVENT(xprt_recvfrom);
-DEFINE_XDRBUF_EVENT(svc_recvfrom);
-DEFINE_XDRBUF_EVENT(svc_sendto);
TRACE_DEFINE_ENUM(RPC_AUTH_OK);
TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
@@ -142,29 +294,35 @@ TRACE_EVENT(rpc_request,
TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
+TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS);
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
+TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN);
TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
TRACE_DEFINE_ENUM(RPC_TASK_SENT);
TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
+TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
#define rpc_show_task_flags(flags) \
__print_flags(flags, "|", \
{ RPC_TASK_ASYNC, "ASYNC" }, \
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
+ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
+ { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \
{ RPC_TASK_SENT, "SENT" }, \
{ RPC_TASK_TIMEOUT, "TIMEOUT" }, \
{ RPC_TASK_NOCONNECT, "NOCONNECT" }, \
- { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
+ { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \
+ { RPC_TASK_CRED_NOREF, "CRED_NOREF" })
TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
@@ -359,6 +517,34 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
DEFINE_RPC_REPLY_EVENT(bad_creds);
DEFINE_RPC_REPLY_EVENT(auth_tooweak);
+TRACE_EVENT(rpc_call_rpcerror,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int tk_status,
+ int rpc_status
+ ),
+
+ TP_ARGS(task, tk_status, rpc_status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, tk_status)
+ __field(int, rpc_status)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->task_id = task->tk_pid;
+ __entry->tk_status = tk_status;
+ __entry->rpc_status = rpc_status;
+ ),
+
+ TP_printk("task:%u@%u tk_status=%d rpc_status=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->tk_status, __entry->rpc_status)
+);
+
TRACE_EVENT(rpc_stats_latency,
TP_PROTO(
@@ -526,43 +712,6 @@ TRACE_EVENT(rpc_xdr_alignment,
)
);
-TRACE_EVENT(rpc_reply_pages,
- TP_PROTO(
- const struct rpc_rqst *req
- ),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(const void *, head_base)
- __field(size_t, head_len)
- __field(const void *, tail_base)
- __field(size_t, tail_len)
- __field(unsigned int, page_len)
- ),
-
- TP_fast_assign(
- __entry->task_id = req->rq_task->tk_pid;
- __entry->client_id = req->rq_task->tk_client->cl_clid;
-
- __entry->head_base = req->rq_rcv_buf.head[0].iov_base;
- __entry->head_len = req->rq_rcv_buf.head[0].iov_len;
- __entry->page_len = req->rq_rcv_buf.page_len;
- __entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
- __entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
- ),
-
- TP_printk(
- "task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
- __entry->task_id, __entry->client_id,
- __entry->head_base, __entry->head_len,
- __entry->page_len,
- __entry->tail_base, __entry->tail_len
- )
-);
-
/*
* First define the enums in the below macros to be exported to userspace
* via TRACE_DEFINE_ENUM().
@@ -575,9 +724,9 @@ TRACE_EVENT(rpc_reply_pages,
#define RPC_SHOW_SOCKET \
EM( SS_FREE, "FREE" ) \
EM( SS_UNCONNECTED, "UNCONNECTED" ) \
- EM( SS_CONNECTING, "CONNECTING," ) \
- EM( SS_CONNECTED, "CONNECTED," ) \
- EMe(SS_DISCONNECTING, "DISCONNECTING" )
+ EM( SS_CONNECTING, "CONNECTING" ) \
+ EM( SS_CONNECTED, "CONNECTED" ) \
+ EMe( SS_DISCONNECTING, "DISCONNECTING" )
#define rpc_show_socket_state(state) \
__print_symbolic(state, RPC_SHOW_SOCKET)
@@ -719,6 +868,69 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
+TRACE_DEFINE_ENUM(XPRT_LOCKED);
+TRACE_DEFINE_ENUM(XPRT_CONNECTED);
+TRACE_DEFINE_ENUM(XPRT_CONNECTING);
+TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT);
+TRACE_DEFINE_ENUM(XPRT_BOUND);
+TRACE_DEFINE_ENUM(XPRT_BINDING);
+TRACE_DEFINE_ENUM(XPRT_CLOSING);
+TRACE_DEFINE_ENUM(XPRT_CONGESTED);
+TRACE_DEFINE_ENUM(XPRT_CWND_WAIT);
+TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
+
+#define rpc_show_xprt_state(x) \
+ __print_flags(x, "|", \
+ { (1UL << XPRT_LOCKED), "LOCKED"}, \
+ { (1UL << XPRT_CONNECTED), "CONNECTED"}, \
+ { (1UL << XPRT_CONNECTING), "CONNECTING"}, \
+ { (1UL << XPRT_CLOSE_WAIT), "CLOSE_WAIT"}, \
+ { (1UL << XPRT_BOUND), "BOUND"}, \
+ { (1UL << XPRT_BINDING), "BINDING"}, \
+ { (1UL << XPRT_CLOSING), "CLOSING"}, \
+ { (1UL << XPRT_CONGESTED), "CONGESTED"}, \
+ { (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \
+ { (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"})
+
+DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
+ TP_PROTO(
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __entry->state = xprt->state;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s state=%s",
+ __get_str(addr), __get_str(port),
+ rpc_show_xprt_state(__entry->state))
+);
+
+#define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \
+ DEFINE_EVENT(rpc_xprt_lifetime_class, \
+ xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
+
+DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
+
DECLARE_EVENT_CLASS(rpc_xprt_event,
TP_PROTO(
const struct rpc_xprt *xprt,
@@ -990,6 +1202,54 @@ TRACE_EVENT(xs_stream_read_request,
__entry->copied, __entry->reclen, __entry->offset)
);
+
+DECLARE_EVENT_CLASS(svc_xdr_buf_class,
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(rqst, xdr),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->head_base = xdr->head[0].iov_base;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len, __entry->page_len,
+ __entry->tail_base, __entry->tail_len, __entry->msg_len
+ )
+);
+
+#define DEFINE_SVCXDRBUF_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_buf_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqst, \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(rqst, xdr))
+
+DEFINE_SVCXDRBUF_EVENT(recvfrom);
+DEFINE_SVCXDRBUF_EVENT(sendto);
+
#define show_rqstp_flags(flags) \
__print_flags(flags, "|", \
{ (1UL << RQ_SECURE), "RQ_SECURE"}, \
@@ -1024,6 +1284,17 @@ TRACE_EVENT(svc_recv,
show_rqstp_flags(__entry->flags))
);
+TRACE_DEFINE_ENUM(SVC_GARBAGE);
+TRACE_DEFINE_ENUM(SVC_SYSERR);
+TRACE_DEFINE_ENUM(SVC_VALID);
+TRACE_DEFINE_ENUM(SVC_NEGATIVE);
+TRACE_DEFINE_ENUM(SVC_OK);
+TRACE_DEFINE_ENUM(SVC_DROP);
+TRACE_DEFINE_ENUM(SVC_CLOSE);
+TRACE_DEFINE_ENUM(SVC_DENIED);
+TRACE_DEFINE_ENUM(SVC_PENDING);
+TRACE_DEFINE_ENUM(SVC_COMPLETE);
+
#define svc_show_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
@@ -1167,28 +1438,54 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
{ (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
{ (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
+TRACE_EVENT(svc_xprt_create_err,
+ TP_PROTO(
+ const char *program,
+ const char *protocol,
+ struct sockaddr *sap,
+ const struct svc_xprt *xprt
+ ),
+
+ TP_ARGS(program, protocol, sap, xprt),
+
+ TP_STRUCT__entry(
+ __field(long, error)
+ __string(program, program)
+ __string(protocol, protocol)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->error = PTR_ERR(xprt);
+ __assign_str(program, program);
+ __assign_str(protocol, protocol);
+ memcpy(__entry->addr, sap, sizeof(__entry->addr));
+ ),
+
+ TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
+ __entry->addr, __get_str(program), __get_str(protocol),
+ __entry->error)
+);
+
TRACE_EVENT(svc_xprt_do_enqueue,
TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
TP_ARGS(xprt, rqst),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(int, pid)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
__entry->pid = rqst? rqst->rq_task->pid : 0;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s pid=%d flags=%s",
- __entry->xprt, __get_str(addr),
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ TP_printk("addr=%s pid=%d flags=%s", __get_str(addr),
+ __entry->pid, show_svc_xprt_flags(__entry->flags))
);
DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -1197,25 +1494,55 @@ DECLARE_EVENT_CLASS(svc_xprt_event,
TP_ARGS(xprt),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s flags=%s",
- __entry->xprt, __get_str(addr),
- show_svc_xprt_flags(__entry->flags))
+ TP_printk("addr=%s flags=%s", __get_str(addr),
+ show_svc_xprt_flags(__entry->flags))
);
-DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
- TP_PROTO(struct svc_xprt *xprt),
- TP_ARGS(xprt));
+#define DEFINE_SVC_XPRT_EVENT(name) \
+ DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \
+ TP_PROTO( \
+ struct svc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
+
+DEFINE_SVC_XPRT_EVENT(no_write_space);
+DEFINE_SVC_XPRT_EVENT(close);
+DEFINE_SVC_XPRT_EVENT(detach);
+DEFINE_SVC_XPRT_EVENT(free);
+
+TRACE_EVENT(svc_xprt_accept,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const char *service
+ ),
+
+ TP_ARGS(xprt, service),
+
+ TP_STRUCT__entry(
+ __string(addr, xprt->xpt_remotebuf)
+ __string(protocol, xprt->xpt_class->xcl_name)
+ __string(service, service)
+ ),
+
+ TP_fast_assign(
+ __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_str(protocol, xprt->xpt_class->xcl_name)
+ __assign_str(service, service);
+ ),
+
+ TP_printk("addr=%s protocol=%s service=%s",
+ __get_str(addr), __get_str(protocol), __get_str(service)
+ )
+);
TRACE_EVENT(svc_xprt_dequeue,
TP_PROTO(struct svc_rqst *rqst),
@@ -1223,24 +1550,20 @@ TRACE_EVENT(svc_xprt_dequeue,
TP_ARGS(rqst),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(unsigned long, flags)
__field(unsigned long, wakeup)
__string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = rqst->rq_xprt;
__entry->flags = rqst->rq_xprt->xpt_flags;
__entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
rqst->rq_qtime));
__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu",
- __entry->xprt, __get_str(addr),
- show_svc_xprt_flags(__entry->flags),
- __entry->wakeup)
+ TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr),
+ show_svc_xprt_flags(__entry->flags), __entry->wakeup)
);
TRACE_EVENT(svc_wake_up,
@@ -1265,21 +1588,18 @@ TRACE_EVENT(svc_handle_xprt,
TP_ARGS(xprt, len),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(int, len)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
__entry->len = len;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s len=%d flags=%s",
- __entry->xprt, __get_str(addr),
+ TP_printk("addr=%s len=%d flags=%s", __get_str(addr),
__entry->len, show_svc_xprt_flags(__entry->flags))
);
@@ -1313,27 +1633,221 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_ARGS(dr),
TP_STRUCT__entry(
+ __field(const void *, dr)
__field(u32, xid)
__string(addr, dr->xprt->xpt_remotebuf)
),
TP_fast_assign(
+ __entry->dr = dr;
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
__assign_str(addr, dr->xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+ TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+ __entry->xid)
);
+
#define DEFINE_SVC_DEFERRED_EVENT(name) \
- DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \
+ DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \
TP_PROTO( \
const struct svc_deferred_req *dr \
), \
TP_ARGS(dr))
DEFINE_SVC_DEFERRED_EVENT(drop);
-DEFINE_SVC_DEFERRED_EVENT(revisit);
+DEFINE_SVC_DEFERRED_EVENT(queue);
+DEFINE_SVC_DEFERRED_EVENT(recv);
+
+TRACE_EVENT(svcsock_new_socket,
+ TP_PROTO(
+ const struct socket *socket
+ ),
+
+ TP_ARGS(socket),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, type)
+ __field(unsigned long, family)
+ __field(bool, listener)
+ ),
+
+ TP_fast_assign(
+ __entry->type = socket->type;
+ __entry->family = socket->sk->sk_family;
+ __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+ ),
+
+ TP_printk("type=%s family=%s%s",
+ show_socket_type(__entry->type),
+ rpc_show_address_family(__entry->family),
+ __entry->listener ? " (listener)" : ""
+ )
+);
+
+TRACE_EVENT(svcsock_marker,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ __be32 marker
+ ),
+
+ TP_ARGS(xprt, marker),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, length)
+ __field(bool, last)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK;
+ __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s length=%u%s", __get_str(addr),
+ __entry->length, __entry->last ? " (last)" : "")
+);
+
+DECLARE_EVENT_CLASS(svcsock_class,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ ssize_t result
+ ),
+
+ TP_ARGS(xprt, result),
+
+ TP_STRUCT__entry(
+ __field(ssize_t, result)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s result=%zd flags=%s", __get_str(addr),
+ __entry->result, show_svc_xprt_flags(__entry->flags)
+ )
+);
+
+#define DEFINE_SVCSOCK_EVENT(name) \
+ DEFINE_EVENT(svcsock_class, svcsock_##name, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt, \
+ ssize_t result \
+ ), \
+ TP_ARGS(xprt, result))
+
+DEFINE_SVCSOCK_EVENT(udp_send);
+DEFINE_SVCSOCK_EVENT(udp_recv);
+DEFINE_SVCSOCK_EVENT(udp_recv_err);
+DEFINE_SVCSOCK_EVENT(tcp_send);
+DEFINE_SVCSOCK_EVENT(tcp_recv);
+DEFINE_SVCSOCK_EVENT(tcp_recv_eagain);
+DEFINE_SVCSOCK_EVENT(tcp_recv_err);
+DEFINE_SVCSOCK_EVENT(data_ready);
+DEFINE_SVCSOCK_EVENT(write_space);
+
+TRACE_EVENT(svcsock_tcp_recv_short,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ u32 expected,
+ u32 received
+ ),
+
+ TP_ARGS(xprt, expected, received),
+
+ TP_STRUCT__entry(
+ __field(u32, expected)
+ __field(u32, received)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->expected = expected;
+ __entry->received = received;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s flags=%s expected=%u received=%u",
+ __get_str(addr), show_svc_xprt_flags(__entry->flags),
+ __entry->expected, __entry->received
+ )
+);
+
+TRACE_EVENT(svcsock_tcp_state,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const struct socket *socket
+ ),
+
+ TP_ARGS(xprt, socket),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, socket_state)
+ __field(unsigned long, sock_state)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->socket_state = socket->state;
+ __entry->sock_state = socket->sk->sk_state;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr),
+ rpc_show_socket_state(__entry->socket_state),
+ rpc_show_sock_state(__entry->sock_state),
+ show_svc_xprt_flags(__entry->flags)
+ )
+);
+
+DECLARE_EVENT_CLASS(svcsock_accept_class,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const char *service,
+ long status
+ ),
+
+ TP_ARGS(xprt, service, status),
+
+ TP_STRUCT__entry(
+ __field(long, status)
+ __string(service, service)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __assign_str(service, service);
+ memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr));
+ ),
+
+ TP_printk("listener=%pISpc service=%s status=%ld",
+ __entry->addr, __get_str(service), __entry->status
+ )
+);
+
+#define DEFINE_ACCEPT_EVENT(name) \
+ DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt, \
+ const char *service, \
+ long status \
+ ), \
+ TP_ARGS(xprt, service, status))
+
+DEFINE_ACCEPT_EVENT(accept);
+DEFINE_ACCEPT_EVENT(getpeername);
DECLARE_EVENT_CLASS(cache_event,
TP_PROTO(
@@ -1368,6 +1882,86 @@ DEFINE_CACHE_EVENT(cache_entry_update);
DEFINE_CACHE_EVENT(cache_entry_make_negative);
DEFINE_CACHE_EVENT(cache_entry_no_listener);
+DECLARE_EVENT_CLASS(register_class,
+ TP_PROTO(
+ const char *program,
+ const u32 version,
+ const int family,
+ const unsigned short protocol,
+ const unsigned short port,
+ int error
+ ),
+
+ TP_ARGS(program, version, family, protocol, port, error),
+
+ TP_STRUCT__entry(
+ __field(u32, version)
+ __field(unsigned long, family)
+ __field(unsigned short, protocol)
+ __field(unsigned short, port)
+ __field(int, error)
+ __string(program, program)
+ ),
+
+ TP_fast_assign(
+ __entry->version = version;
+ __entry->family = family;
+ __entry->protocol = protocol;
+ __entry->port = port;
+ __entry->error = error;
+ __assign_str(program, program);
+ ),
+
+ TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d",
+ __get_str(program), __entry->version,
+ __entry->protocol == IPPROTO_UDP ? "udp" : "tcp",
+ __entry->port, rpc_show_address_family(__entry->family),
+ __entry->error
+ )
+);
+
+#define DEFINE_REGISTER_EVENT(name) \
+ DEFINE_EVENT(register_class, svc_##name, \
+ TP_PROTO( \
+ const char *program, \
+ const u32 version, \
+ const int family, \
+ const unsigned short protocol, \
+ const unsigned short port, \
+ int error \
+ ), \
+ TP_ARGS(program, version, family, protocol, \
+ port, error))
+
+DEFINE_REGISTER_EVENT(register);
+DEFINE_REGISTER_EVENT(noregister);
+
+TRACE_EVENT(svc_unregister,
+ TP_PROTO(
+ const char *program,
+ const u32 version,
+ int error
+ ),
+
+ TP_ARGS(program, version, error),
+
+ TP_STRUCT__entry(
+ __field(u32, version)
+ __field(int, error)
+ __string(program, program)
+ ),
+
+ TP_fast_assign(
+ __entry->version = version;
+ __entry->error = error;
+ __assign_str(program, program);
+ ),
+
+ TP_printk("program=%sv%u error=%d",
+ __get_str(program), __entry->version, __entry->error
+ )
+);
+
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c65b374a5090..974a71342aea 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3168,7 +3168,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
* Description
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
* If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
@@ -3761,6 +3761,19 @@ struct xdp_md {
__u32 egress_ifindex; /* txq->dev->ifindex */
};
+/* DEVMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_devmap_val {
+ __u32 ifindex; /* device index */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
+};
+
enum sk_action {
SK_DROP = 0,
SK_PASS,
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index b6aac7ee1f67..4c14e8be7267 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -205,6 +205,7 @@ struct fb_bitfield {
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT 512 /* for KDSET vt ioctl */
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 379a612f8f1d..f44eb0a04afd 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -262,6 +262,7 @@ struct fsxattr {
#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
+#define FS_DAX_FL 0x02000000 /* Inode is DAX */
#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */
#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index ed3d5893830d..4c8884eea808 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -69,6 +69,7 @@
#define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */
#define KEYCTL_MOVE 30 /* Move keys between keyrings */
#define KEYCTL_CAPABILITIES 31 /* Find capabilities of keyrings subsystem */
+#define KEYCTL_WATCH_KEY 32 /* Watch a key or ring of keys for changes */
/* keyctl structures */
struct keyctl_dh_params {
@@ -130,5 +131,6 @@ struct keyctl_pkey_params {
#define KEYCTL_CAPS0_MOVE 0x80 /* KEYCTL_MOVE supported */
#define KEYCTL_CAPS1_NS_KEYRING_NAME 0x01 /* Keyring names are per-user_namespace */
#define KEYCTL_CAPS1_NS_KEY_TAG 0x02 /* Key indexing can include a namespace tag */
+#define KEYCTL_CAPS1_NOTIFICATIONS 0x04 /* Keys generate watchable notifications */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h
index 84f15f48a7cb..bee366540212 100644
--- a/include/uapi/linux/mrp_bridge.h
+++ b/include/uapi/linux/mrp_bridge.h
@@ -36,7 +36,6 @@ enum br_mrp_port_state_type {
enum br_mrp_port_role_type {
BR_MRP_PORT_ROLE_PRIMARY,
BR_MRP_PORT_ROLE_SECONDARY,
- BR_MRP_PORT_ROLE_NONE,
};
enum br_mrp_tlv_header_type {
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index de5d90212409..0e09dc5cec19 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -244,6 +244,7 @@ struct nd_cmd_pkg {
#define NVDIMM_FAMILY_HPE2 2
#define NVDIMM_FAMILY_MSFT 3
#define NVDIMM_FAMILY_HYPERV 4
+#define NVDIMM_FAMILY_PAPR 5
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
struct nd_cmd_pkg)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index dad8c8f8581f..4e6339ab1fce 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -794,7 +794,7 @@
* various triggers. These triggers can be configured through this
* command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For
* more background information, see
- * http://wireless.kernel.org/en/users/Documentation/WoWLAN.
+ * https://wireless.wiki.kernel.org/en/users/Documentation/WoWLAN.
* The @NL80211_CMD_SET_WOWLAN command can also be used as a notification
* from the driver reporting the wakeup reason. In this case, the
* @NL80211_ATTR_WOWLAN_TRIGGERS attribute will contain the reason
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index cba368e55863..c21edb966c19 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -64,10 +64,12 @@
/* supported values for SO_RDS_TRANSPORT */
#define RDS_TRANS_IB 0
-#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_GAP 1
#define RDS_TRANS_TCP 2
#define RDS_TRANS_COUNT 3
#define RDS_TRANS_NONE (~0)
+/* don't use RDS_TRANS_IWARP - it is deprecated */
+#define RDS_TRANS_IWARP RDS_TRANS_GAP
/* IOCTLS commands for SOL_RDS */
#define SIOCRDSSETTOS (SIOCPROTOPRIVATE)
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index ee0f2460bff6..d56427c0b3e0 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -48,6 +48,10 @@
#define SPI_TX_QUAD 0x200
#define SPI_RX_DUAL 0x400
#define SPI_RX_QUAD 0x800
+#define SPI_CS_WORD 0x1000
+#define SPI_TX_OCTAL 0x2000
+#define SPI_RX_OCTAL 0x4000
+#define SPI_3WIRE_HIZ 0x8000
/*---------------------------------------------------------------------------*/
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 9fe72e4b1373..0c2349612e77 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define VHOST_FILE_UNBIND -1
+
/* ioctls */
#define VHOST_VIRTIO 0xAF
@@ -140,4 +142,6 @@
/* Get the max ring size. */
#define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16)
+/* Set event fd for config interrupt*/
+#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index ecc27a17401a..b052355ac7a3 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -44,6 +44,7 @@
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM 24 /* virtio mem */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
new file mode 100644
index 000000000000..a9ffe041843c
--- /dev/null
+++ b/include/uapi/linux/virtio_mem.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio Mem Device
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MEM_H
+#define _LINUX_VIRTIO_MEM_H
+
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+/*
+ * Each virtio-mem device manages a dedicated region in physical address
+ * space. Each device can belong to a single NUMA node, multiple devices
+ * for a single NUMA node are possible. A virtio-mem device is like a
+ * "resizable DIMM" consisting of small memory blocks that can be plugged
+ * or unplugged. The device driver is responsible for (un)plugging memory
+ * blocks on demand.
+ *
+ * Virtio-mem devices can only operate on their assigned memory region in
+ * order to (un)plug memory. A device cannot (un)plug memory belonging to
+ * other devices.
+ *
+ * The "region_size" corresponds to the maximum amount of memory that can
+ * be provided by a device. The "size" corresponds to the amount of memory
+ * that is currently plugged. "requested_size" corresponds to a request
+ * from the device to the device driver to (un)plug blocks. The
+ * device driver should try to (un)plug blocks in order to reach the
+ * "requested_size". It is impossible to plug more memory than requested.
+ *
+ * The "usable_region_size" represents the memory region that can actually
+ * be used to (un)plug memory. It is always at least as big as the
+ * "requested_size" and will grow dynamically. It will only shrink when
+ * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
+ *
+ * There are no guarantees what will happen if unplugged memory is
+ * read/written. Such memory should, in general, not be touched. E.g.,
+ * even writing might succeed, but the values will simply be discarded at
+ * random points in time.
+ *
+ * It can happen that the device cannot process a request, because it is
+ * busy. The device driver has to retry later.
+ *
+ * Usually, during system resets all memory will get unplugged, so the
+ * device driver can start with a clean state. However, in specific
+ * scenarios (if the device is busy) it can happen that the device still
+ * has memory plugged. The device driver can request to unplug all memory
+ * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
+ * device is busy.
+ */
+
+/* --- virtio-mem: feature bits --- */
+
+/* node_id is an ACPI PXM and is valid */
+#define VIRTIO_MEM_F_ACPI_PXM 0
+
+
+/* --- virtio-mem: guest -> host requests --- */
+
+/* request to plug memory blocks */
+#define VIRTIO_MEM_REQ_PLUG 0
+/* request to unplug memory blocks */
+#define VIRTIO_MEM_REQ_UNPLUG 1
+/* request to unplug all blocks and shrink the usable size */
+#define VIRTIO_MEM_REQ_UNPLUG_ALL 2
+/* request information about the plugged state of memory blocks */
+#define VIRTIO_MEM_REQ_STATE 3
+
+struct virtio_mem_req_plug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_unplug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_state {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_req_plug plug;
+ struct virtio_mem_req_unplug unplug;
+ struct virtio_mem_req_state state;
+ } u;
+};
+
+
+/* --- virtio-mem: host -> guest response --- */
+
+/*
+ * Request processed successfully, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ACK 0
+/*
+ * Request denied - e.g. trying to plug more than requested, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ */
+#define VIRTIO_MEM_RESP_NACK 1
+/*
+ * Request cannot be processed right now, try again later, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ */
+#define VIRTIO_MEM_RESP_BUSY 2
+/*
+ * Error in request (e.g. addresses/alignment), applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ERROR 3
+
+
+/* State of memory blocks is "plugged" */
+#define VIRTIO_MEM_STATE_PLUGGED 0
+/* State of memory blocks is "unplugged" */
+#define VIRTIO_MEM_STATE_UNPLUGGED 1
+/* State of memory blocks is "mixed" */
+#define VIRTIO_MEM_STATE_MIXED 2
+
+struct virtio_mem_resp_state {
+ __virtio16 state;
+};
+
+struct virtio_mem_resp {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_resp_state state;
+ } u;
+};
+
+/* --- virtio-mem: configuration --- */
+
+struct virtio_mem_config {
+ /* Block size and alignment. Cannot change. */
+ __u64 block_size;
+ /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
+ __u16 node_id;
+ __u8 padding[6];
+ /* Start address of the memory region. Cannot change. */
+ __u64 addr;
+ /* Region size (maximum). Cannot change. */
+ __u64 region_size;
+ /*
+ * Currently usable region size. Can grow up to region_size. Can
+ * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
+ * update will be sent).
+ */
+ __u64 usable_region_size;
+ /*
+ * Currently used size. Changes due to plug/unplug requests, but no
+ * config updates will be sent.
+ */
+ __u64 plugged_size;
+ /* Requested size. New plug requests cannot exceed it. Can change. */
+ __u64 requested_size;
+};
+
+#endif /* _LINUX_VIRTIO_MEM_H */
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 559f42e73315..476d3e5c0fe7 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -86,6 +86,13 @@
* at the end of the used ring. Guest should ignore the used->flags field. */
#define VIRTIO_RING_F_EVENT_IDX 29
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
@@ -112,28 +119,47 @@ struct vring_used_elem {
__virtio32 len;
};
+typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_elem_t;
+
struct vring_used {
__virtio16 flags;
__virtio16 idx;
- struct vring_used_elem ring[];
+ vring_used_elem_t ring[];
};
+/*
+ * The ring element addresses are passed between components with different
+ * alignments assumptions. Thus, we might need to decrease the compiler-selected
+ * alignment, and so must use a typedef to make sure the aligned attribute
+ * actually takes hold:
+ *
+ * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
+ *
+ * When used on a struct, or struct member, the aligned attribute can only
+ * increase the alignment; in order to decrease it, the packed attribute must
+ * be specified as well. When used as part of a typedef, the aligned attribute
+ * can both increase and decrease alignment, and specifying the packed
+ * attribute generates a warning.
+ */
+typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
+ vring_desc_t;
+typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
+ vring_avail_t;
+typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_t;
+
struct vring {
unsigned int num;
- struct vring_desc *desc;
+ vring_desc_t *desc;
- struct vring_avail *avail;
+ vring_avail_t *avail;
- struct vring_used *used;
+ vring_used_t *used;
};
-/* Alignment requirements for vring elements.
- * When using pre-virtio 1.0 layout, these fall out naturally.
- */
-#define VRING_AVAIL_ALIGN_SIZE 2
-#define VRING_USED_ALIGN_SIZE 4
-#define VRING_DESC_ALIGN_SIZE 16
+#ifndef VIRTIO_RING_NO_LEGACY
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
@@ -181,6 +207,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
+#endif /* VIRTIO_RING_NO_LEGACY */
+
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h
new file mode 100644
index 000000000000..c3d8320b5d3a
--- /dev/null
+++ b/include/uapi/linux/watch_queue.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_WATCH_QUEUE_H
+#define _UAPI_LINUX_WATCH_QUEUE_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/ioctl.h>
+
+#define O_NOTIFICATION_PIPE O_EXCL /* Parameter to pipe2() selecting notification pipe */
+
+#define IOC_WATCH_QUEUE_SET_SIZE _IO('W', 0x60) /* Set the size in pages */
+#define IOC_WATCH_QUEUE_SET_FILTER _IO('W', 0x61) /* Set the filter */
+
+enum watch_notification_type {
+ WATCH_TYPE_META = 0, /* Special record */
+ WATCH_TYPE_KEY_NOTIFY = 1, /* Key change event notification */
+ WATCH_TYPE__NR = 2
+};
+
+enum watch_meta_notification_subtype {
+ WATCH_META_REMOVAL_NOTIFICATION = 0, /* Watched object was removed */
+ WATCH_META_LOSS_NOTIFICATION = 1, /* Data loss occurred */
+};
+
+/*
+ * Notification record header. This is aligned to 64-bits so that subclasses
+ * can contain __u64 fields.
+ */
+struct watch_notification {
+ __u32 type:24; /* enum watch_notification_type */
+ __u32 subtype:8; /* Type-specific subtype (filterable) */
+ __u32 info;
+#define WATCH_INFO_LENGTH 0x0000007f /* Length of record */
+#define WATCH_INFO_LENGTH__SHIFT 0
+#define WATCH_INFO_ID 0x0000ff00 /* ID of watchpoint */
+#define WATCH_INFO_ID__SHIFT 8
+#define WATCH_INFO_TYPE_INFO 0xffff0000 /* Type-specific info */
+#define WATCH_INFO_TYPE_INFO__SHIFT 16
+#define WATCH_INFO_FLAG_0 0x00010000 /* Type-specific info, flag bit 0 */
+#define WATCH_INFO_FLAG_1 0x00020000 /* ... */
+#define WATCH_INFO_FLAG_2 0x00040000
+#define WATCH_INFO_FLAG_3 0x00080000
+#define WATCH_INFO_FLAG_4 0x00100000
+#define WATCH_INFO_FLAG_5 0x00200000
+#define WATCH_INFO_FLAG_6 0x00400000
+#define WATCH_INFO_FLAG_7 0x00800000
+};
+
+/*
+ * Notification filtering rules (IOC_WATCH_QUEUE_SET_FILTER).
+ */
+struct watch_notification_type_filter {
+ __u32 type; /* Type to apply filter to */
+ __u32 info_filter; /* Filter on watch_notification::info */
+ __u32 info_mask; /* Mask of relevant bits in info_filter */
+ __u32 subtype_filter[8]; /* Bitmask of subtypes to filter on */
+};
+
+struct watch_notification_filter {
+ __u32 nr_filters; /* Number of filters */
+ __u32 __reserved; /* Must be 0 */
+ struct watch_notification_type_filter filters[];
+};
+
+
+/*
+ * Extended watch removal notification. This is used optionally if the type
+ * wants to indicate an identifier for the object being watched, if there is
+ * such. This can be distinguished by the length.
+ *
+ * type -> WATCH_TYPE_META
+ * subtype -> WATCH_META_REMOVAL_NOTIFICATION
+ */
+struct watch_notification_removal {
+ struct watch_notification watch;
+ __u64 id; /* Type-dependent identifier */
+};
+
+/*
+ * Type of key/keyring change notification.
+ */
+enum key_notification_subtype {
+ NOTIFY_KEY_INSTANTIATED = 0, /* Key was instantiated (aux is error code) */
+ NOTIFY_KEY_UPDATED = 1, /* Key was updated */
+ NOTIFY_KEY_LINKED = 2, /* Key (aux) was added to watched keyring */
+ NOTIFY_KEY_UNLINKED = 3, /* Key (aux) was removed from watched keyring */
+ NOTIFY_KEY_CLEARED = 4, /* Keyring was cleared */
+ NOTIFY_KEY_REVOKED = 5, /* Key was revoked */
+ NOTIFY_KEY_INVALIDATED = 6, /* Key was invalidated */
+ NOTIFY_KEY_SETATTR = 7, /* Key's attributes got changed */
+};
+
+/*
+ * Key/keyring notification record.
+ * - watch.type = WATCH_TYPE_KEY_NOTIFY
+ * - watch.subtype = enum key_notification_type
+ */
+struct key_notification {
+ struct watch_notification watch;
+ __u32 key_id; /* The key/keyring affected */
+ __u32 aux; /* Per-type auxiliary data */
+};
+
+#endif /* _UAPI_LINUX_WATCH_QUEUE_H */
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index c1395b5bd432..9463db2dfa9d 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -7,6 +7,7 @@
Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ Copyright (c) 2020 Jan (janneke) Nieuwenhuizen <janneke@gnu.org>
*/
#include <linux/libc-compat.h>
@@ -31,6 +32,9 @@
#define XATTR_BTRFS_PREFIX "btrfs."
#define XATTR_BTRFS_PREFIX_LEN (sizeof(XATTR_BTRFS_PREFIX) - 1)
+#define XATTR_HURD_PREFIX "gnu."
+#define XATTR_HURD_PREFIX_LEN (sizeof(XATTR_HURD_PREFIX) - 1)
+
#define XATTR_SECURITY_PREFIX "security."
#define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 47ffe3208c27..4b48fbf7d343 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -104,6 +104,7 @@ struct mtd_write_req {
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
#define MTD_NO_ERASE 0x1000 /* No erase necessary */
#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
+#define MTD_SLC_ON_MLC_EMULATION 0x4000 /* Emulate SLC behavior on MLC NANDs */
/* Some common devices / combinations of capabilities */
#define MTD_CAP_ROM 0
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
index f77dcbcba5a6..d7f6af50e200 100644
--- a/include/xen/arm/page.h
+++ b/include/xen/arm/page.h
@@ -3,11 +3,11 @@
#define _ASM_ARM_XEN_PAGE_H
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <xen/xen.h>
#include <xen/interface/grant_table.h>
diff --git a/include/xen/events.h b/include/xen/events.h
index 12b0dcb6a120..df1e6391f63f 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -90,13 +90,6 @@ unsigned int irq_from_evtchn(evtchn_port_t evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq);
evtchn_port_t evtchn_from_irq(unsigned irq);
-#ifdef CONFIG_XEN_PVHVM
-/* Xen HVM evtchn vector callback */
-void xen_hvm_callback_vector(void);
-#ifdef CONFIG_TRACING
-#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
-#endif
-#endif
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
index 0b15f8cb17fc..b7fd7fc9ad41 100644
--- a/include/xen/hvm.h
+++ b/include/xen/hvm.h
@@ -58,4 +58,6 @@ static inline int hvm_get_parameter(int idx, uint64_t *value)
#define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\
HVM_CALLBACK_VIA_TYPE_SHIFT | (x))
+void xen_setup_callback_vector(void);
+
#endif /* XEN_HVM_H__ */
diff --git a/include/xen/interface/hvm/hvm_op.h b/include/xen/interface/hvm/hvm_op.h
index 956a04682865..25d945ef17de 100644
--- a/include/xen/interface/hvm/hvm_op.h
+++ b/include/xen/interface/hvm/hvm_op.h
@@ -21,6 +21,8 @@
#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
#define __XEN_PUBLIC_HVM_HVM_OP_H__
+#include <xen/interface/xen.h>
+
/* Get/set subcommands: the second argument of the hypercall is a
* pointer to a xen_hvm_param struct. */
#define HVMOP_set_param 0
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 095be1d66f31..39a5580f8feb 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -215,17 +215,7 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
void xen_efi_runtime_setup(void);
-#ifdef CONFIG_PREEMPTION
-
-static inline void xen_preemptible_hcall_begin(void)
-{
-}
-
-static inline void xen_preemptible_hcall_end(void)
-{
-}
-
-#else
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
@@ -239,6 +229,11 @@ static inline void xen_preemptible_hcall_end(void)
__this_cpu_write(xen_in_preemptible_hcall, false);
}
-#endif /* CONFIG_PREEMPTION */
+#else
+
+static inline void xen_preemptible_hcall_begin(void) { }
+static inline void xen_preemptible_hcall_end(void) { }
+
+#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
#endif /* INCLUDE_XEN_OPS_H */