summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-12-09 15:52:17 -0400
committerJason Gunthorpe <jgg@nvidia.com>2022-12-09 15:52:17 -0400
commitd69e8c63fcbbf695ff7ff2c6d26efead23cfbb3a (patch)
tree4d714ecd331233069ab718989bb017dfd934e129 /include
parent6cfe7bd0dfd33033683639039b5608d6534c19eb (diff)
parent76dcd734eca23168cb008912c0f69ff408905235 (diff)
downloadlinux-d69e8c63fcbbf695ff7ff2c6d26efead23cfbb3a.tar.bz2
Merge tag 'v6.1-rc8' into rdma.git for-next
For dependencies in following patches Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/compat.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h9
-rw-r--r--include/asm-generic/tlb.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/blkdev.h16
-rw-r--r--include/linux/bpf.h60
-rw-r--r--include/linux/can/dev.h16
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/efi.h3
-rw-r--r--include/linux/fault-inject.h7
-rw-r--r--include/linux/fortify-string.h13
-rw-r--r--include/linux/fs.h8
-rw-r--r--include/linux/fscache.h2
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/io_uring.h3
-rw-r--r--include/linux/kvm_host.h25
-rw-r--r--include/linux/maple_tree.h7
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/skmsg.h2
-rw-r--r--include/linux/slab.h23
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/vfio.h1
-rw-r--r--include/net/inet_hashtables.h3
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netlink.h48
-rw-r--r--include/net/sctp/stream_sched.h2
-rw-r--r--include/net/sock.h9
-rw-r--r--include/soc/at91/sama7-ddr.h5
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/info.h4
-rw-r--r--include/trace/events/huge_memory.h8
-rw-r--r--include/uapi/drm/panfrost_drm.h2
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/capability.h2
-rw-r--r--include/uapi/linux/idxd.h1
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/ip.h6
-rw-r--r--include/uapi/linux/ipv6.h6
45 files changed, 263 insertions, 125 deletions
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index aeb257ad3d1a..8392caea398f 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -15,7 +15,7 @@
#endif
#ifndef compat_arg_u64
-#ifdef CONFIG_CPU_BIG_ENDIAN
+#ifndef CONFIG_CPU_BIG_ENDIAN
#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
#else
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index fdce7a4cfc6f..b17c6eeb9afa 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -102,6 +102,15 @@ struct ms_hyperv_tsc_page {
volatile s64 tsc_offset;
} __packed;
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:11;
+ u64 pfn:52;
+ } __packed;
+};
+
/*
* The guest OS needs to register the guest ID with the hypervisor.
* The guest ID is a 64 bit entity and the structure of this ID is
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 492dce43236e..cab7cfebf40b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -222,12 +222,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index d06ada2341cb..3dc5824141cd 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -347,6 +347,7 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
@@ -995,7 +996,6 @@
#ifdef CONFIG_AMD_MEM_ENCRYPT
#define PERCPU_DECRYPTED_SECTION \
. = ALIGN(PAGE_SIZE); \
- *(.data..decrypted) \
*(.data..percpu..decrypted) \
. = ALIGN(PAGE_SIZE);
#else
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 50e358a19d98..891f8cbcd043 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -311,6 +311,13 @@ struct queue_limits {
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
+
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
@@ -456,12 +463,6 @@ struct request_queue {
unsigned long nr_requests; /* Max # of requests */
unsigned int dma_pad_mask;
- /*
- * Drivers that set dma_alignment to less than 511 must be prepared to
- * handle individual bvec's that are not a multiple of a SECTOR_SIZE
- * due to possible offsets.
- */
- unsigned int dma_alignment;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
@@ -944,7 +945,6 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
@@ -1324,7 +1324,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
static inline int queue_dma_alignment(const struct request_queue *q)
{
- return q ? q->dma_alignment : 511;
+ return q ? q->limits.dma_alignment : 511;
}
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0566705c1d4e..c1bd1bd10506 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -27,7 +27,7 @@
#include <linux/bpfptr.h>
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
-#include <linux/init.h>
+#include <linux/static_call.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -315,7 +315,7 @@ static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, b
u32 next_off = map->off_arr->field_off[i];
memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
- curr_off += map->off_arr->field_sz[i];
+ curr_off = next_off + map->off_arr->field_sz[i];
}
memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
@@ -344,7 +344,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
u32 next_off = map->off_arr->field_off[i];
memset(dst + curr_off, 0, next_off - curr_off);
- curr_off += map->off_arr->field_sz[i];
+ curr_off = next_off + map->off_arr->field_sz[i];
}
memset(dst + curr_off, 0, map->value_size - curr_off);
}
@@ -954,6 +954,10 @@ struct bpf_dispatcher {
void *rw_image;
u32 image_off;
struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
};
static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
@@ -971,7 +975,33 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
-int __init bpf_arch_init_dispatcher_early(void *ip);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
@@ -984,34 +1014,21 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
.name = #_name, \
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
}, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
}
-#define BPF_DISPATCHER_INIT_CALL(_name) \
- static int __init _name##_init(void) \
- { \
- return bpf_arch_init_dispatcher_early(_name##_func); \
- } \
- early_initcall(_name##_init)
-
-#ifdef CONFIG_X86_64
-#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
-#else
-#define BPF_DISPATCHER_ATTRIBUTES
-#endif
-
#define DEFINE_BPF_DISPATCHER(name) \
- notrace BPF_DISPATCHER_ATTRIBUTES \
+ __BPF_DISPATCHER_SC(name); \
noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func) \
{ \
- return bpf_func(ctx, insnsi); \
+ return __BPF_DISPATCHER_CALL(name); \
} \
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
struct bpf_dispatcher bpf_dispatcher_##name = \
- BPF_DISPATCHER_INIT(bpf_dispatcher_##name); \
- BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
+ BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
#define DECLARE_BPF_DISPATCHER(name) \
unsigned int bpf_dispatcher_##name##_func( \
@@ -1019,6 +1036,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func); \
extern struct bpf_dispatcher bpf_dispatcher_##name;
+
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 58f5431a5559..982ba245eb41 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -152,6 +152,22 @@ static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ netdev_info_once(dev,
+ "interface in listen only mode, dropping skb\n");
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+}
+
void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 5755ae5a4712..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -14,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 80f3c1c7827d..7603fc58c47c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -389,6 +389,7 @@ void efi_native_runtime_setup(void);
#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -1222,7 +1223,7 @@ efi_status_t efi_random_get_seed(void);
arch_efi_call_virt_teardown(); \
})
-#define EFI_RANDOM_SEED_SIZE 64U
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 9f6e25467844..444236dadcf0 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -20,7 +20,6 @@ struct fault_attr {
atomic_t space;
unsigned long verbose;
bool task_filter;
- bool no_warn;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
@@ -32,6 +31,10 @@ struct fault_attr {
struct dentry *dname;
};
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
@@ -40,11 +43,11 @@ struct fault_attr {
.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
.verbose = 2, \
.dname = NULL, \
- .no_warn = false, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 18a31b125f9d..1067a8450826 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -454,13 +454,18 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
- size_t __fortify_size = (size_t)(size); \
- WARN_ONCE(fortify_memcpy_chk(__fortify_size, p_size, q_size, \
- p_size_field, q_size_field, #op), \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, #op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
__fortify_size, \
"field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
- p_size_field); \
+ __p_size_field); \
__underlying_##op(p, q, __fortify_size); \
})
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e654435f1651..59ae95ddb679 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2089,6 +2089,14 @@ struct dir_context {
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
+
struct iov_iter;
struct io_uring_cmd;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 36e5dd84cf59..8e312c8323a8 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -75,7 +75,7 @@ struct fscache_volume {
atomic_t n_accesses; /* Number of cache accesses in progress */
unsigned int debug_id;
unsigned int key_hash; /* Hash of key string */
- char *key; /* Volume ID, eg. "afs@example.com@1234" */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
struct hlist_bl_node hash_link; /* Link in hash table */
struct work_struct work;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ef4aea3b356e..65a78773dcca 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -210,6 +210,20 @@ alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct p
return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
}
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
+{
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
+}
+
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
@@ -218,7 +232,7 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
return __alloc_pages(gfp_mask, order, nid, NULL);
}
@@ -227,7 +241,7 @@ static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp);
return __folio_alloc(gfp, order, nid, NULL);
}
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 43bc8a2edccf..0ded9e271523 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -16,6 +16,9 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = 4,
IO_URING_F_CQE32 = 8,
IO_URING_F_IOPOLL = 16,
+
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 32,
};
struct io_uring_cmd {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 00c3448ba7f8..637a60607c7d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -776,6 +776,7 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool override_halt_poll_ns;
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
bool vm_bugged;
@@ -1240,8 +1241,18 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
/**
- * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a
- * given guest physical address.
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must
+ * be zero-allocated (or zeroed by the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
*
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
@@ -1265,9 +1276,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
* accessing the target page.
*/
-int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
- gpa_t gpa, unsigned long len);
+int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len);
/**
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -1324,7 +1335,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
/**
- * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache.
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
*
* @kvm: pointer to kvm instance.
* @gpc: struct gfn_to_pfn_cache object.
@@ -1332,7 +1343,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
* This removes a cache from the @kvm's list to be processed on MMU notifier
* invocation.
*/
-void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 2effab72add1..e594db58a0f1 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -638,6 +638,12 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
}
}
+static inline unsigned int mt_height(const struct maple_tree *mt)
+
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max);
@@ -664,6 +670,7 @@ extern atomic_t maple_tree_tests_passed;
void mt_dump(const struct maple_tree *mt);
void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
#define MT_BUG_ON(__tree, __x) do { \
atomic_inc(&maple_tree_tests_run); \
if (__x) { \
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index af2ceb4160bc..06cbad166225 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -981,6 +981,7 @@ struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
void *out; /* pointer to the cmd output buffer */
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8bbcccbc5565..974ccca609d2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1852,6 +1852,25 @@ static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodem
__show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
}
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
+};
+
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
@@ -1869,6 +1888,8 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end);
@@ -3467,12 +3488,4 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
}
#endif
-/*
- * Whether to drop the pte markers, for example, the uffd-wp information for
- * file-backed memory. This should only be specified when we will completely
- * drop the page in the mm, either by truncation or unmapping of the vma. By
- * default, the flag is not set.
- */
-#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
-
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 9c50bc40f8ff..6f7993803ee7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -451,7 +451,7 @@ static inline bool mmc_ready_for_data(u32 status)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a108b60a6962..5f0d7d0b9471 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -165,6 +165,13 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -260,6 +267,17 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
#ifndef arch_has_hw_pte_young
/*
* Return whether the accessed bit is supported on the local CPU.
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 2504df9a0453..3c7d295746f6 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -100,7 +100,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
+ struct file *filp, poll_table *poll_table, int full);
void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 48f4b645193b..70d6cb94e580 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -376,7 +376,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
-void sk_psock_stop(struct sk_psock *psock, bool wait);
+void sk_psock_stop(struct sk_psock *psock);
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 90877fcde70b..45efc6c553b8 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -470,35 +470,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
-#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
__assume_kmalloc_alignment __alloc_size(3);
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
int node, size_t size) __assume_kmalloc_alignment
__alloc_size(4);
-#else /* CONFIG_TRACING */
-/* Save a function call when CONFIG_TRACING=n */
-static __always_inline __alloc_size(3)
-void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
-
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
-
-static __always_inline __alloc_size(4)
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
-
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
-
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
__alloc_size(1);
diff --git a/include/linux/trace.h b/include/linux/trace.h
index b5e16e438448..80ffda871749 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -26,13 +26,13 @@ struct trace_export {
int flags;
};
+struct trace_array;
+
#ifdef CONFIG_TRACING
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
-struct trace_array;
-
void trace_printk_init_buffers(void);
__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index e7cebeb875dd..fdd393f70b19 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -189,6 +189,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
+unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state cur_fsm,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 3af1e927247d..69174093078f 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -281,7 +281,8 @@ inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, in
* sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
* rcv_saddr field should already have been updated when this is called.
*/
-int inet_bhash2_update_saddr(struct inet_bind_hashbucket *prev_saddr, struct sock *sk);
+int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
+void inet_bhash2_reset_saddr(struct sock *sk);
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
struct inet_bind2_bucket *tb2, unsigned short port);
diff --git a/include/net/ip.h b/include/net/ip.h
index 038097c2a152..144bdfbb25af 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -563,7 +563,7 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
offsetof(typeof(flow->addrs), v4addrs.src) +
sizeof(flow->addrs.v4addrs.src));
- memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 37943ba3a73c..d383c895592a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -897,7 +897,7 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
offsetof(typeof(flow->addrs), v6addrs.src) +
sizeof(flow->addrs.v6addrs.src));
- memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ memcpy(&flow->addrs.v6addrs, &iph->addrs, sizeof(flow->addrs.v6addrs));
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 20745cf7ae1a..2f2a6023fb0e 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -83,7 +83,7 @@ struct neigh_parms {
struct rcu_head rcu_head;
int reachable_time;
- int qlen;
+ u32 qlen;
int data[NEIGH_VAR_DATA_MAX];
DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
};
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 4418b1981e31..6bfa972f2fbf 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -181,6 +181,8 @@ enum {
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
+ NLA_BE16,
+ NLA_BE32,
__NLA_TYPE_MAX,
};
@@ -231,6 +233,7 @@ enum nla_policy_validation {
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
* NLA_S32, NLA_S64,
+ * NLA_BE16, NLA_BE32,
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
@@ -261,6 +264,8 @@ enum nla_policy_validation {
* NLA_U16,
* NLA_U32,
* NLA_U64,
+ * NLA_BE16,
+ * NLA_BE32,
* NLA_S8,
* NLA_S16,
* NLA_S32,
@@ -317,19 +322,10 @@ struct nla_policy {
u8 validation_type;
u16 len;
union {
- const u32 bitfield32_valid;
- const u32 mask;
- const char *reject_message;
- const struct nla_policy *nested_policy;
- struct netlink_range_validation *range;
- struct netlink_range_validation_signed *range_signed;
- struct {
- s16 min, max;
- u8 network_byte_order:1;
- };
- int (*validate)(const struct nlattr *attr,
- struct netlink_ext_ack *extack);
- /* This entry is special, and used for the attribute at index 0
+ /**
+ * @strict_start_type: first attribute to validate strictly
+ *
+ * This entry is special, and used for the attribute at index 0
* only, and specifies special data about the policy, namely it
* specifies the "boundary type" where strict length validation
* starts for any attribute types >= this value, also, strict
@@ -348,6 +344,19 @@ struct nla_policy {
* was added to enforce strict validation from thereon.
*/
u16 strict_start_type;
+
+ /* private: use NLA_POLICY_*() to set */
+ const u32 bitfield32_valid;
+ const u32 mask;
+ const char *reject_message;
+ const struct nla_policy *nested_policy;
+ struct netlink_range_validation *range;
+ struct netlink_range_validation_signed *range_signed;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
};
};
@@ -369,6 +378,8 @@ struct nla_policy {
(tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
#define __NLA_IS_SINT_TYPE(tp) \
(tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
+#define __NLA_IS_BEINT_TYPE(tp) \
+ (tp == NLA_BE16 || tp == NLA_BE32)
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
@@ -382,6 +393,7 @@ struct nla_policy {
#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
__NLA_IS_SINT_TYPE(tp) || \
+ __NLA_IS_BEINT_TYPE(tp) || \
tp == NLA_MSECS || \
tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
@@ -389,6 +401,8 @@ struct nla_policy {
tp != NLA_REJECT && \
tp != NLA_NESTED && \
tp != NLA_NESTED_ARRAY) + tp)
+#define NLA_ENSURE_BEINT_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
@@ -419,14 +433,6 @@ struct nla_policy {
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
- .network_byte_order = 0, \
-}
-
-#define NLA_POLICY_MAX_BE(tp, _max) { \
- .type = NLA_ENSURE_UINT_TYPE(tp), \
- .validation_type = NLA_VALIDATE_MAX, \
- .max = _max, \
- .network_byte_order = 1, \
}
#define NLA_POLICY_MASK(tp, _mask) { \
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 01a70b27e026..65058faea4db 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -26,6 +26,8 @@ struct sctp_sched_ops {
int (*init)(struct sctp_stream *stream);
/* Init a stream */
int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
+ /* free a stream */
+ void (*free_sid)(struct sctp_stream *stream, __u16 sid);
/* Frees the entire thing */
void (*free)(struct sctp_stream *stream);
diff --git a/include/net/sock.h b/include/net/sock.h
index 22f8bab583dd..e0517ecc6531 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -323,7 +323,7 @@ struct sk_filter;
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
- * @sk_user_data: RPC layer private data
+ * @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
@@ -1889,6 +1889,13 @@ void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
+static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
+{
+ if (sk->sk_socket)
+ clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
+ WRITE_ONCE(sk->sk_prot, proto);
+}
+
struct sockcm_cookie {
u64 transmit_time;
u32 mark;
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
index 6ce3bd22f6c6..5ad7ac2e3a7c 100644
--- a/include/soc/at91/sama7-ddr.h
+++ b/include/soc/at91/sama7-ddr.h
@@ -26,7 +26,10 @@
#define DDR3PHY_PGSR (0x0C) /* DDR3PHY PHY General Status Register */
#define DDR3PHY_PGSR_IDONE (1 << 0) /* Initialization Done */
-#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
+#define DDR3PHY_ACDLLCR (0x14) /* DDR3PHY AC DLL Control Register */
+#define DDR3PHY_ACDLLCR_DLLSRST (1 << 30) /* DLL Soft Reset */
+
+#define DDR3PHY_ACIOCR (0x24) /* DDR3PHY AC I/O Configuration Register */
#define DDR3PHY_ACIOCR_CSPDD_CS0 (1 << 18) /* CS#[0] Power Down Driver */
#define DDR3PHY_ACIOCR_CKPDD_CK0 (1 << 8) /* CK[0] Power Down Driver */
#define DDR3PHY_ACIORC_ACPDD (1 << 3) /* AC Power Down Driver */
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 83fd81c82e4c..9fbd3832bcdc 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -84,8 +84,8 @@ enum sof_ipc_dai_type {
SOF_DAI_AMD_BT, /**< AMD ACP BT*/
SOF_DAI_AMD_SP, /**< AMD ACP SP */
SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
- SOF_DAI_AMD_HS, /**< Amd HS */
SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
+ SOF_DAI_AMD_HS, /**< Amd HS */
};
/* general purpose DAI configuration */
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 65e86e4e9fd8..75193850ead0 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -36,6 +36,10 @@ enum sof_ipc_ext_data {
SOF_IPC_EXT_USER_ABI_INFO = 4,
};
+/* Build u32 number in format MMmmmppp */
+#define SOF_FW_VER(MAJOR, MINOR, PATCH) ((uint32_t)( \
+ ((MAJOR) << 24) | ((MINOR) << 12) | (PATCH)))
+
/* FW version - SOF_IPC_GLB_VERSION */
struct sof_ipc_fw_version {
struct sof_ipc_hdr hdr;
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 935af4947917..760455dfa860 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -171,15 +171,15 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
TRACE_EVENT(mm_khugepaged_scan_file,
- TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+ TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file,
int present, int swap, int result),
- TP_ARGS(mm, page, filename, present, swap, result),
+ TP_ARGS(mm, page, file, present, swap, result),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
- __string(filename, filename)
+ __string(filename, file->f_path.dentry->d_iname)
__field(int, present)
__field(int, swap)
__field(int, result)
@@ -188,7 +188,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TP_fast_assign(
__entry->mm = mm;
__entry->pfn = page ? page_to_pfn(page) : -1;
- __assign_str(filename, filename);
+ __assign_str(filename, file->f_path.dentry->d_iname);
__entry->present = present;
__entry->swap = swap;
__entry->result = result;
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 6f93c915cc88..9f231d40a146 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -254,7 +254,7 @@ struct panfrost_dump_object_header {
__u64 nbos;
} reghdr;
- struct pan_bomap_hdr {
+ struct {
__u32 valid;
__u64 iova;
__u32 data[2];
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 7c1dc818b1d5..d676ed2b246e 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -187,7 +187,7 @@
#define AUDIT_MAX_KEY_LEN 256
#define AUDIT_BITMASK_SIZE 64
#define AUDIT_WORD(nr) ((__u32)((nr)/32))
-#define AUDIT_BIT(nr) (1 << ((nr) - AUDIT_WORD(nr)*32))
+#define AUDIT_BIT(nr) (1U << ((nr) - AUDIT_WORD(nr)*32))
#define AUDIT_SYSCALL_CLASSES 16
#define AUDIT_CLASS_DIR_WRITE 0
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 463d1ba2232a..3d61a0ae055d 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -426,7 +426,7 @@ struct vfs_ns_cap_data {
*/
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
-#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
+#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
#endif /* _UAPI_LINUX_CAPABILITY_H */
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index 095299c75828..2b9e7feba3f3 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -29,6 +29,7 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
+ IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f243ce665f74..07a4cb149305 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -20,6 +20,7 @@
#define _UAPI_LINUX_IN_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/libc-compat.h>
#include <linux/socket.h>
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ab7458033ee3..2df3225b562f 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -222,7 +222,7 @@ enum io_uring_op {
/*
* sqe->uring_cmd_flags
- * IORING_URING_CMD_FIXED use registered buffer; pass thig flag
+ * IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe->buf_index.
*/
#define IORING_URING_CMD_FIXED (1U << 0)
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 961ec16a26b8..874a92349bf5 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -100,8 +100,10 @@ struct iphdr {
__u8 ttl;
__u8 protocol;
__sum16 check;
- __be32 saddr;
- __be32 daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ __be32 saddr;
+ __be32 daddr;
+ );
/*The options start here. */
};
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 03cdbe798fe3..81f4243bebb1 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -130,8 +130,10 @@ struct ipv6hdr {
__u8 nexthdr;
__u8 hop_limit;
- struct in6_addr saddr;
- struct in6_addr daddr;
+ __struct_group(/* no tag */, addrs, /* no attrs */,
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ );
};