diff options
Diffstat (limited to 'include')
67 files changed, 808 insertions, 534 deletions
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index c08758b6b364..c05d2ce9b6cd 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void); u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); void hyperv_cleanup(void); bool hv_query_ext_cap(u64 cap_query); +void hv_setup_dma_ops(struct device *dev, bool coherent); void *hv_map_memory(void *addr, unsigned long size); void hv_unmap_memory(void *addr); #else /* CONFIG_HYPERV */ diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h new file mode 100644 index 000000000000..c845493e4d37 --- /dev/null +++ b/include/dt-bindings/clock/sun6i-rtc.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ + +#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_ +#define _DT_BINDINGS_CLK_SUN6I_RTC_H_ + +#define CLK_OSC32K 0 +#define CLK_OSC32K_FANOUT 1 +#define CLK_IOSC 2 + +#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */ diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 338aa27e4773..edb7f6d41faa 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) #ifdef CONFIG_BALLOON_COMPACTION extern const struct address_space_operations balloon_aops; -extern bool balloon_page_isolate(struct page *page, - isolate_mode_t mode); -extern void balloon_page_putback(struct page *page); -extern int balloon_page_migrate(struct address_space *mapping, - struct page *newpage, - struct page *page, enum migrate_mode mode); /* * balloon_page_insert - insert a page into the balloon's page list and make @@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page) list_del(&page->lru); } -static inline bool balloon_page_isolate(struct page *page) -{ - return false; -} - -static inline void balloon_page_putback(struct page *page) -{ - return; -} - -static inline int balloon_page_migrate(struct page *newpage, - struct page *page, enum migrate_mode mode) -{ - return 0; -} - static inline gfp_t balloon_mapping_gfp_mask(void) { return GFP_HIGHUSER; diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index f2ad8ed8f777..652cd05b0924 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -95,7 +95,10 @@ struct blkcg_gq { spinlock_t async_bio_lock; struct bio_list async_bios; - struct work_struct async_bio_work; + union { + struct work_struct async_bio_work; + struct work_struct free_work; + }; atomic_t use_delay; atomic64_t delay_nsec; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index dd0763a1c674..1973ef9bd40f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -85,8 +85,10 @@ struct block_device { */ #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) typedef u32 __bitwise blk_status_t; +typedef u32 blk_short_t; #else typedef u8 __bitwise blk_status_t; +typedef u16 blk_short_t; #endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c1fc4af47f69..3a9d2d7cc6b7 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -570,9 +570,11 @@ static inline u32 type_flag(u32 type) return type & ~BPF_BASE_TYPE_MASK; } +/* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) { - return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; + return prog->type == BPF_PROG_TYPE_EXT ? + prog->aux->dst_prog->type : prog->type; } #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h index cf32123b39f5..57c8ec44ab4e 100644 --- a/include/linux/clk/sunxi-ng.h +++ b/include/linux/clk/sunxi-ng.h @@ -9,4 +9,6 @@ int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode); int sunxi_ccu_get_mmc_timing_mode(struct clk *clk); +int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg); + #endif diff --git a/include/linux/cma.h b/include/linux/cma.h index 90fd742fd1ef..a6f637342740 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -12,10 +12,6 @@ */ #ifdef CONFIG_CMA_AREAS #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) - -#else -#define MAX_CMA_AREAS (0) - #endif #define CMA_MAX_NAME 64 diff --git a/include/linux/fs.h b/include/linux/fs.h index 183160872133..bbde95387a23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -275,7 +275,6 @@ enum positive_aop_returns { AOP_TRUNCATED_PAGE = 0x80001, }; -#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ #define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct * helper code (eg buffer layer) * to clear GFP_FS from alloc */ @@ -338,28 +337,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) return kiocb->ki_complete == NULL; } -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user *buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, - unsigned long, unsigned long); - struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); @@ -370,12 +347,6 @@ struct address_space_operations { /* Mark a folio dirty. Return true if this dirtied it */ bool (*dirty_folio)(struct address_space *, struct folio *); - /* - * Reads in the requested pages. Unlike ->readpage(), this is - * PURELY used for read-ahead!. - */ - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, @@ -3027,7 +2998,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); -extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t); +ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index d44ff747a657..6727fb0db619 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -457,6 +457,20 @@ int fscache_begin_read_operation(struct netfs_cache_resources *cres, } /** + * fscache_end_operation - End the read operation for the netfs lib + * @cres: The cache resources for the read operation + * + * Clean up the resources at the end of the read request. + */ +static inline void fscache_end_operation(struct netfs_cache_resources *cres) +{ + const struct netfs_cache_ops *ops = fscache_operation_valid(cres); + + if (ops) + ops->end_operation(cres); +} + +/** * fscache_read - Start a read from the cache. * @cres: The cache resources to use * @start_pos: The beginning file offset in the cache file diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index b568b3c7d095..a7afc800bd8d 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * * This checks whether ->i_verity_info has been set. * - * Filesystems call this from ->readpages() to check whether the pages need to + * Filesystems call this from ->readahead() to check whether the pages need to * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ed8cf433a46a..4816b7e11047 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -9,6 +9,7 @@ #include <linux/trace_recursion.h> #include <linux/trace_clock.h> +#include <linux/jump_label.h> #include <linux/kallsyms.h> #include <linux/linkage.h> #include <linux/bitops.h> @@ -1018,7 +1019,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, extern int register_ftrace_graph(struct fgraph_ops *ops); extern void unregister_ftrace_graph(struct fgraph_ops *ops); -extern bool ftrace_graph_is_dead(void); +/** + * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called + * + * ftrace_graph_stop() is called when a severe error is detected in + * the function graph tracing. This function is called by the critical + * paths of function graph to keep those paths from doing any more harm. + */ +DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph); + +static inline bool ftrace_graph_is_dead(void) +{ + return static_branch_unlikely(&kill_ftrace_graph); +} + extern void ftrace_graph_stop(void); /* The current handlers in use */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0fa17fb85de5..761f8f1885c7 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -264,9 +264,7 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (24 + \ - 3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ - IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b0728c8ad90c..98c93510640e 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -168,13 +168,16 @@ struct gpio_irq_chip { /** * @parent_handler_data: + * + * If @per_parent_data is false, @parent_handler_data is a single + * pointer used as the data associated with every parent interrupt. + * * @parent_handler_data_array: * - * Data associated, and passed to, the handler for the parent - * interrupt. Can either be a single pointer if @per_parent_data - * is false, or an array of @num_parents pointers otherwise. If - * @per_parent_data is true, @parent_handler_data_array cannot be - * NULL. + * If @per_parent_data is true, @parent_handler_data_array is + * an array of @num_parents pointers, and is used to associate + * different data for each parent. This cannot be NULL if + * @per_parent_data is true. */ union { void *parent_handler_data; diff --git a/include/linux/input.h b/include/linux/input.h index 0354b298d874..49790c1bd2c4 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -475,6 +475,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even void input_alloc_absinfo(struct input_dev *dev); void input_set_abs_params(struct input_dev *dev, unsigned int axis, int min, int max, int fuzz, int flat); +void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, + const struct input_dev *src, unsigned int src_axis); #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \ static inline int input_abs_get_##_suffix(struct input_dev *dev, \ diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h new file mode 100644 index 000000000000..7e4b7023bf04 --- /dev/null +++ b/include/linux/input/vivaldi-fmap.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _VIVALDI_FMAP_H +#define _VIVALDI_FMAP_H + +#include <linux/types.h> + +#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24 + +/** + * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards + * @function_row_physmap: An array of scancodes or their equivalent (HID usage + * codes, encoded rows/columns, etc) for the top + * row function keys, in an order from left to right + * @num_function_row_keys: The number of top row keys in a custom keyboard + * + * This structure is supposed to be used by ChromeOS keyboards using + * the Vivaldi keyboard function row design. + */ +struct vivaldi_data { + u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS]; + unsigned int num_function_row_keys; +}; + +ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data, + char *buf); + +#endif /* _VIVALDI_FMAP_H */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9536ffa0473b..3f9b22c4983a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) #define KVM_REQUEST_WAIT BIT(9) +#define KVM_REQUEST_NO_ACTION BIT(10) /* * Architecture-independent vcpu->requests bit members * Bits 4-7 are reserved for more arch-independent bits. @@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 -#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 +/* + * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to + * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" + * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing + * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous + * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no + * guarantee the vCPU received an IPI and has actually exited guest mode. + */ +#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ @@ -1221,27 +1231,27 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * @gpc: struct gfn_to_pfn_cache object. * @vcpu: vCPU to be used for marking pages dirty and to be woken on * invalidation. - * @guest_uses_pa: indicates that the resulting host physical PFN is used while - * @vcpu is IN_GUEST_MODE so invalidations should wake it. - * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @usage: indicates if the resulting host physical PFN is used while + * the @vcpu is IN_GUEST_MODE (in which case invalidation of + * the cache from MMU notifiers---but not for KVM memslot + * changes!---will also force @vcpu to exit the guest and + * refresh the cache); and/or if the PFN used directly + * by KVM (and thus needs a kernel virtual mapping). * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. * -EFAULT for an untranslatable guest physical address. * * This primes a gfn_to_pfn_cache and links it into the @kvm's list for - * invalidations to be processed. Invalidation callbacks to @vcpu using - * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM - * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check() - * to ensure that the cache is valid before accessing the target page. + * invalidations to be processed. Callers are required to use + * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before + * accessing the target page. */ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty); + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len); /** * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. @@ -1250,7 +1260,6 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @gpc: struct gfn_to_pfn_cache object. * @gpa: current guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: %true if the cache is still valid and the address matches. * %false if the cache is not valid. @@ -1272,7 +1281,6 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @gpc: struct gfn_to_pfn_cache object. * @gpa: updated guest physical address to map. * @len: sanity check; the range being access must fit a single page. - * @dirty: mark the cache dirty immediately. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. @@ -1285,7 +1293,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * with the lock still held to permit access. */ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len, bool dirty); + gpa_t gpa, unsigned long len); /** * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. @@ -1293,10 +1301,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * @kvm: pointer to kvm instance. * @gpc: struct gfn_to_pfn_cache object. * - * This unmaps the referenced page and marks it dirty, if appropriate. The - * cache is left in the invalid state but at least the mapping from GPA to - * userspace HVA will remain cached and can be reused on a subsequent - * refresh. + * This unmaps the referenced page. The cache is left in the invalid state + * but at least the mapping from GPA to userspace HVA will remain cached + * and can be reused on a subsequent refresh. */ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); @@ -1984,7 +1991,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) void kvm_arch_irq_routing_update(struct kvm *kvm); -static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) { /* * Ensure the rest of the request is published to kvm_check_request's @@ -1994,6 +2001,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); } +static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) +{ + /* + * Request that don't require vCPU action should never be logged in + * vcpu->requests. The vCPU won't clear the request, so it will stay + * logged indefinitely and prevent the vCPU from entering the guest. + */ + BUILD_BUG_ON(!__builtin_constant_p(req) || + (req & KVM_REQUEST_NO_ACTION)); + + __kvm_make_request(req, vcpu); +} + static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) { return READ_ONCE(vcpu->requests); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index dceac12c1ce5..ac1ebb37a0ff 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -18,6 +18,7 @@ struct kvm_memslots; enum kvm_mr_change; +#include <linux/bits.h> #include <linux/types.h> #include <linux/spinlock_types.h> @@ -46,6 +47,12 @@ typedef u64 hfn_t; typedef hfn_t kvm_pfn_t; +enum pfn_cache_usage { + KVM_GUEST_USES_PFN = BIT(0), + KVM_HOST_USES_PFN = BIT(1), + KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN, +}; + struct gfn_to_hva_cache { u64 generation; gpa_t gpa; @@ -64,11 +71,9 @@ struct gfn_to_pfn_cache { rwlock_t lock; void *khva; kvm_pfn_t pfn; + enum pfn_cache_usage usage; bool active; bool valid; - bool dirty; - bool kernel_map; - bool guest_uses_pa; }; #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 808bb4cee230..b0da04fe087b 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -86,6 +86,8 @@ struct cmos_rtc_board_info { /* 2 values for divider stage reset, others for "testing purposes only" */ # define RTC_DIV_RESET1 0x60 # define RTC_DIV_RESET2 0x70 + /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */ +# define RTC_AMD_BANK_SELECT 0x10 /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ # define RTC_RATE_SELECT 0x0F diff --git a/include/linux/net.h b/include/linux/net.h index ba736b457a06..12093f4db50c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -125,6 +125,25 @@ struct socket { struct socket_wq wq; }; +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + struct vm_area_struct; struct page; struct sockaddr; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 59e27a2b7bf0..28ea4f8269d4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -59,7 +59,8 @@ struct dsa_port; struct ip_tunnel_parm; struct macsec_context; struct macsec_ops; - +struct netdev_name_node; +struct sd_flow_limit; struct sfp_bus; /* 802.11 specific */ struct wireless_dev; @@ -202,6 +203,7 @@ struct net_device_core_stats { local_t rx_dropped; local_t tx_dropped; local_t rx_nohandler; + local_t rx_otherhost_dropped; } __aligned(4 * sizeof(local_t)); #include <linux/cache.h> @@ -862,6 +864,7 @@ enum net_device_path_type { DEV_PATH_BRIDGE, DEV_PATH_PPPOE, DEV_PATH_DSA, + DEV_PATH_MTK_WDMA, }; struct net_device_path { @@ -887,6 +890,12 @@ struct net_device_path { int port; u16 proto; } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + } mtk_wdma; }; }; @@ -1013,16 +1022,6 @@ struct dev_ifalias { struct devlink; struct tlsdev_ops; -struct netdev_name_node { - struct hlist_node hlist; - struct list_head list; - struct net_device *dev; - const char *name; -}; - -int netdev_name_node_alt_create(struct net_device *dev, const char *name); -int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); - struct netdev_net_notifier { struct list_head list; struct notifier_block *nb; @@ -2968,7 +2967,6 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); -int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); @@ -3027,19 +3025,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; } -#ifdef CONFIG_NET_FLOW_LIMIT -#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ -struct sd_flow_limit { - u64 count; - unsigned int num_buckets; - unsigned int history_head; - u16 history[FLOW_LIMIT_HISTORY]; - u8 buckets[]; -}; - -extern int netdev_flow_limit_table_len; -#endif /* CONFIG_NET_FLOW_LIMIT */ - /* * Incoming packets are placed on per-CPU queues */ @@ -3763,7 +3748,6 @@ int dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); void __dev_notify_flags(struct net_device *, unsigned int old_flags, unsigned int gchanges); -int dev_change_name(struct net_device *, const char *); int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); int __dev_change_net_namespace(struct net_device *dev, struct net *net, @@ -3775,13 +3759,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, return __dev_change_net_namespace(dev, net, pat, 0); } int __dev_set_mtu(struct net_device *, int); -int dev_validate_mtu(struct net_device *dev, int mtu, - struct netlink_ext_ack *extack); -int dev_set_mtu_ext(struct net_device *dev, int mtu, - struct netlink_ext_ack *extack); int dev_set_mtu(struct net_device *, int); -int dev_change_tx_queue_len(struct net_device *, unsigned long); -void dev_set_group(struct net_device *, int); int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, struct netlink_ext_ack *extack); int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, @@ -3789,24 +3767,13 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack); int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); -int dev_change_carrier(struct net_device *, bool new_carrier); -int dev_get_phys_port_id(struct net_device *dev, - struct netdev_phys_item_id *ppid); -int dev_get_phys_port_name(struct net_device *dev, - char *name, size_t len); int dev_get_port_parent_id(struct net_device *dev, struct netdev_phys_item_id *ppid, bool recurse); bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); -int dev_change_proto_down(struct net_device *dev, bool proto_down); -void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, - u32 value); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); -typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); -int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, - int fd, int expected_fd, u32 flags); int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u8 dev_xdp_prog_count(struct net_device *dev); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); @@ -3871,6 +3838,7 @@ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(tx_dropped) DEV_CORE_STATS_INC(rx_nohandler) +DEV_CORE_STATS_INC(rx_otherhost_dropped) static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb, @@ -3891,12 +3859,6 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, bool dev_nit_active(struct net_device *dev); void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); -extern int netdev_budget; -extern unsigned int netdev_budget_usecs; - -/* Called by rtnetlink.c:rtnl_unlock() */ -void netdev_run_todo(void); - static inline void __dev_put(struct net_device *dev) { if (dev) { @@ -4013,10 +3975,7 @@ static inline void dev_replace_track(struct net_device *odev, * called netif_lowerlayer_*() because they represent the state of any * kind of lower layer not just hardware media. */ - -void linkwatch_init_dev(struct net_device *dev); void linkwatch_fire_event(struct net_device *dev); -void linkwatch_forget_dev(struct net_device *dev); /** * netif_carrier_ok - test if carrier present @@ -4462,9 +4421,6 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); int dev_addr_del(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); -void dev_addr_flush(struct net_device *dev); -int dev_addr_init(struct net_device *dev); -void dev_addr_check(struct net_device *dev); /* Functions used for unicast addresses handling */ int dev_uc_add(struct net_device *dev, const unsigned char *addr); @@ -4554,7 +4510,6 @@ static inline void __dev_mc_unsync(struct net_device *dev, /* Functions used for secondary unicast and multicast support */ void dev_set_rx_mode(struct net_device *dev); -void __dev_set_rx_mode(struct net_device *dev); int dev_set_promiscuity(struct net_device *dev, int inc); int dev_set_allmulti(struct net_device *dev, int inc); void netdev_state_change(struct net_device *dev); @@ -4572,11 +4527,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); extern int netdev_max_backlog; -extern int netdev_tstamp_prequeue; -extern int netdev_unregister_timeout_secs; -extern int weight_p; -extern int dev_weight_rx_bias; -extern int dev_weight_tx_bias; extern int dev_rx_weight; extern int dev_tx_weight; extern int gro_normal_batch; @@ -4764,12 +4714,6 @@ static inline void netdev_rx_csum_fault(struct net_device *dev, void net_enable_timestamp(void); void net_disable_timestamp(void); -#ifdef CONFIG_PROC_FS -int __init dev_proc_init(void); -#else -#define dev_proc_init() 0 -#endif - static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, struct sk_buff *skb, struct net_device *dev, bool more) @@ -4805,8 +4749,6 @@ extern const struct kobj_ns_type_operations net_ns_type_operations; const char *netdev_drivername(const struct net_device *dev); -void linkwatch_run_queue(void); - static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, netdev_features_t f2) { diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 614f22213e21..c7bf1eaf51d5 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -18,6 +18,8 @@ #include <linux/fs.h> #include <linux/pagemap.h> +enum netfs_sreq_ref_trace; + /* * Overload PG_private_2 to give us PG_fscache - this is used to indicate that * a page is currently backed by a local disk cache @@ -106,7 +108,7 @@ static inline int wait_on_page_fscache_killable(struct page *page) return folio_wait_private_2_killable(page_folio(page)); } -enum netfs_read_source { +enum netfs_io_source { NETFS_FILL_WITH_ZEROES, NETFS_DOWNLOAD_FROM_SERVER, NETFS_READ_FROM_CACHE, @@ -117,6 +119,17 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, bool was_async); /* + * Per-inode description. This must be directly after the inode struct. + */ +struct netfs_i_context { + const struct netfs_request_ops *ops; +#if IS_ENABLED(CONFIG_FSCACHE) + struct fscache_cookie *cache; +#endif + loff_t remote_i_size; /* Size of the remote file */ +}; + +/* * Resources required to do operations on a cache. */ struct netfs_cache_resources { @@ -130,69 +143,75 @@ struct netfs_cache_resources { /* * Descriptor for a single component subrequest. */ -struct netfs_read_subrequest { - struct netfs_read_request *rreq; /* Supervising read request */ +struct netfs_io_subrequest { + struct netfs_io_request *rreq; /* Supervising I/O request */ struct list_head rreq_link; /* Link in rreq->subrequests */ loff_t start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ - refcount_t usage; + refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ - enum netfs_read_source source; /* Where to read from */ + enum netfs_io_source source; /* Where to read from/write to */ unsigned long flags; -#define NETFS_SREQ_WRITE_TO_CACHE 0 /* Set if should write to cache */ +#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ -#define NETFS_SREQ_SHORT_READ 2 /* Set if there was a short read from the cache */ +#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ }; +enum netfs_io_origin { + NETFS_READAHEAD, /* This read was triggered by readahead */ + NETFS_READPAGE, /* This read is a synchronous read */ + NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ +} __mode(byte); + /* - * Descriptor for a read helper request. This is used to make multiple I/O - * requests on a variety of sources and then stitch the result together. + * Descriptor for an I/O helper request. This is used to make multiple I/O + * operations to a variety of data stores and then stitch the result together. */ -struct netfs_read_request { +struct netfs_io_request { struct work_struct work; struct inode *inode; /* The file being accessed */ struct address_space *mapping; /* The mapping being accessed */ struct netfs_cache_resources cache_resources; - struct list_head subrequests; /* Requests to fetch I/O from disk or net */ + struct list_head subrequests; /* Contributory I/O operations */ void *netfs_priv; /* Private data for the netfs */ unsigned int debug_id; - atomic_t nr_rd_ops; /* Number of read ops in progress */ - atomic_t nr_wr_ops; /* Number of write ops in progress */ + atomic_t nr_outstanding; /* Number of ops in progress */ + atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ size_t len; /* Length of the request */ short error; /* 0 or error that occurred */ + enum netfs_io_origin origin; /* Origin of the request */ loff_t i_size; /* Size of the file */ loff_t start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ - refcount_t usage; + refcount_t ref; unsigned long flags; #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ -#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */ +#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ #define NETFS_RREQ_FAILED 4 /* The request failed */ #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ - const struct netfs_read_request_ops *netfs_ops; + const struct netfs_request_ops *netfs_ops; }; /* * Operations the network filesystem can/must provide to the helpers. */ -struct netfs_read_request_ops { - bool (*is_cache_enabled)(struct inode *inode); - void (*init_rreq)(struct netfs_read_request *rreq, struct file *file); - int (*begin_cache_operation)(struct netfs_read_request *rreq); - void (*expand_readahead)(struct netfs_read_request *rreq); - bool (*clamp_length)(struct netfs_read_subrequest *subreq); - void (*issue_op)(struct netfs_read_subrequest *subreq); - bool (*is_still_valid)(struct netfs_read_request *rreq); +struct netfs_request_ops { + int (*init_request)(struct netfs_io_request *rreq, struct file *file); + int (*begin_cache_operation)(struct netfs_io_request *rreq); + void (*expand_readahead)(struct netfs_io_request *rreq); + bool (*clamp_length)(struct netfs_io_subrequest *subreq); + void (*issue_read)(struct netfs_io_subrequest *subreq); + bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, struct folio *folio, void **_fsdata); - void (*done)(struct netfs_read_request *rreq); + void (*done)(struct netfs_io_request *rreq); void (*cleanup)(struct address_space *mapping, void *netfs_priv); }; @@ -235,7 +254,7 @@ struct netfs_cache_ops { /* Prepare a read operation, shortening it to a cached/uncached * boundary as appropriate. */ - enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq, + enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, loff_t i_size); /* Prepare a write operation, working out what part of the write we can @@ -254,20 +273,89 @@ struct netfs_cache_ops { }; struct readahead_control; -extern void netfs_readahead(struct readahead_control *, - const struct netfs_read_request_ops *, - void *); -extern int netfs_readpage(struct file *, - struct folio *, - const struct netfs_read_request_ops *, - void *); +extern void netfs_readahead(struct readahead_control *); +extern int netfs_readpage(struct file *, struct page *); extern int netfs_write_begin(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct folio **, - void **, - const struct netfs_read_request_ops *, - void *); + void **); -extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool); +extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); +extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what); +extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, + bool was_async, enum netfs_sreq_ref_trace what); extern void netfs_stats_show(struct seq_file *); +/** + * netfs_i_context - Get the netfs inode context from the inode + * @inode: The inode to query + * + * Get the netfs lib inode context from the network filesystem's inode. The + * context struct is expected to directly follow on from the VFS inode struct. + */ +static inline struct netfs_i_context *netfs_i_context(struct inode *inode) +{ + return (struct netfs_i_context *)(inode + 1); +} + +/** + * netfs_inode - Get the netfs inode from the inode context + * @ctx: The context to query + * + * Get the netfs inode from the netfs library's inode context. The VFS inode + * is expected to directly precede the context struct. + */ +static inline struct inode *netfs_inode(struct netfs_i_context *ctx) +{ + return ((struct inode *)ctx) - 1; +} + +/** + * netfs_i_context_init - Initialise a netfs lib context + * @inode: The inode with which the context is associated + * @ops: The netfs's operations list + * + * Initialise the netfs library context struct. This is expected to follow on + * directly from the VFS inode struct. + */ +static inline void netfs_i_context_init(struct inode *inode, + const struct netfs_request_ops *ops) +{ + struct netfs_i_context *ctx = netfs_i_context(inode); + + memset(ctx, 0, sizeof(*ctx)); + ctx->ops = ops; + ctx->remote_i_size = i_size_read(inode); +} + +/** + * netfs_resize_file - Note that a file got resized + * @inode: The inode being resized + * @new_i_size: The new file size + * + * Inform the netfs lib that a file got resized so that it can adjust its state. + */ +static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size) +{ + struct netfs_i_context *ctx = netfs_i_context(inode); + + ctx->remote_i_size = new_i_size; +} + +/** + * netfs_i_cookie - Get the cache cookie from the inode + * @inode: The inode to query + * + * Get the caching cookie (if enabled) from the network filesystem's inode. + */ +static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode) +{ +#if IS_ENABLED(CONFIG_FSCACHE) + struct netfs_i_context *ctx = netfs_i_context(inode); + return ctx->cache; +#else + return NULL; +#endif +} + #endif /* _LINUX_NETFS_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 4f44f83817a9..f626a445d1a8 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -346,6 +346,7 @@ enum { NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, + NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a8d0b327b066..993994cd943a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -752,8 +752,6 @@ struct page *read_cache_page(struct address_space *, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); -extern int read_cache_pages(struct address_space *mapping, - struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, struct file *file) diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h deleted file mode 100644 index 249d4d7fbf18..000000000000 --- a/include/linux/pci-dma-compat.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* include this file if the platform implements the dma_ DMA Mapping API - * and wants to provide the pci_ DMA Mapping API in terms of it */ - -#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H -#define _ASM_GENERIC_PCI_DMA_COMPAT_H - -#include <linux/dma-mapping.h> - -/* This defines the direction arg to the DMA mapping routines. */ -#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL -#define PCI_DMA_TODEVICE DMA_TO_DEVICE -#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE -#define PCI_DMA_NONE DMA_NONE - -static inline void * -pci_alloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); -} - -static inline void * -pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, - dma_addr_t *dma_handle) -{ - return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); -} - -static inline void -pci_free_consistent(struct pci_dev *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) -{ - dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); -} - -static inline dma_addr_t -pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) -{ - return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, - size_t size, int direction) -{ - dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); -} - -static inline dma_addr_t -pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) -{ - return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, - size_t size, int direction) -{ - dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); -} - -static inline int -pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); -} - -static inline void -pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, - int nents, int direction) -{ - dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, - size_t size, int direction) -{ - dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); -} - -static inline void -pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, - int nelems, int direction) -{ - dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); -} - -static inline int -pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) -{ - return dma_mapping_error(&pdev->dev, dma_addr); -} - -#ifdef CONFIG_PCI -static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) -{ - return dma_set_mask(&dev->dev, mask); -} - -static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) -{ - return dma_set_coherent_mask(&dev->dev, mask); -} -#else -static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) -{ return -EIO; } -static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) -{ return -EIO; } -#endif - -#endif diff --git a/include/linux/pci.h b/include/linux/pci.h index b957eeb89c7a..60adf42460ab 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2473,8 +2473,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #endif -/* Provide the legacy pci_dma_* API */ -#include <linux/pci-dma-compat.h> +#include <linux/dma-mapping.h> #define pci_printk(level, pdev, fmt, arg...) \ dev_printk(level, &(pdev)->dev, fmt, ##arg) diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 47fd1c2d3a57..1fd9c6a21ebe 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -110,8 +110,6 @@ struct rtc_device { struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; - /* Some hardware can't support UIE mode */ - int uie_unsupported; /* * This offset specifies the update timing of the RTC. diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h index 67ee9d20cc5a..5a41c3bbcbe3 100644 --- a/include/linux/rtc/ds1685.h +++ b/include/linux/rtc/ds1685.h @@ -46,7 +46,6 @@ struct ds1685_priv { u32 regstep; int irq_num; bool bcd_mode; - bool no_irq; u8 (*read)(struct ds1685_priv *, int); void (*write)(struct ds1685_priv *, int, u8); void (*prepare_poweroff)(void); diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index dffeb8281c2d..8f5a86e210b9 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index) static inline void sbitmap_free(struct sbitmap *sb) { free_percpu(sb->alloc_hint); - kfree(sb->map); + kvfree(sb->map); sb->map = NULL; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 4a6fdd2a679f..d5e3c00b74e1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1090,9 +1090,6 @@ struct task_struct { /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; -#ifdef CONFIG_RT_DELAYED_SIGNALS - struct kernel_siginfo forced_info; -#endif unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 88cc16444b43..60820ab511d2 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -162,6 +162,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *); int seq_path_root(struct seq_file *m, const struct path *path, const struct path *root, const char *esc); +void *single_start(struct seq_file *, loff_t *); int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); int single_release(struct inode *, struct file *); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3a30cae8b0a5..2394441fa3dd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3836,8 +3836,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, struct sk_buff *__skb_recv_datagram(struct sock *sk, struct sk_buff_head *sk_queue, unsigned int flags, int *off, int *err); -struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, - int *err); +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err); __poll_t datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int skb_copy_datagram_iter(const struct sk_buff *from, int offset, diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h new file mode 100644 index 000000000000..7e00cca06709 --- /dev/null +++ b/include/linux/soc/mediatek/mtk_wed.h @@ -0,0 +1,131 @@ +#ifndef __MTK_WED_H +#define __MTK_WED_H + +#include <linux/kernel.h> +#include <linux/rcupdate.h> +#include <linux/regmap.h> +#include <linux/pci.h> + +#define MTK_WED_TX_QUEUES 2 + +struct mtk_wed_hw; +struct mtk_wdma_desc; + +struct mtk_wed_ring { + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + int size; + + u32 reg_base; + void __iomem *wpdma; +}; + +struct mtk_wed_device { +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + const struct mtk_wed_ops *ops; + struct device *dev; + struct mtk_wed_hw *hw; + bool init_done, running; + int wdma_idx; + int irq; + + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring txfree_ring; + struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; + + struct { + int size; + void **pages; + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + } buf_ring; + + /* filled by driver: */ + struct { + struct pci_dev *pci_dev; + + u32 wpdma_phys; + + u16 token_start; + unsigned int nbuf; + + u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); + int (*offload_enable)(struct mtk_wed_device *wed); + void (*offload_disable)(struct mtk_wed_device *wed); + } wlan; +#endif +}; + +struct mtk_wed_ops { + int (*attach)(struct mtk_wed_device *dev); + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + int (*txfree_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); + void (*detach)(struct mtk_wed_device *dev); + + void (*stop)(struct mtk_wed_device *dev); + void (*start)(struct mtk_wed_device *dev, u32 irq_mask); + void (*reset_dma)(struct mtk_wed_device *dev); + + u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg); + void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val); + + u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); + void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); +}; + +extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; + +static inline int +mtk_wed_device_attach(struct mtk_wed_device *dev) +{ + int ret = -ENODEV; + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + rcu_read_lock(); + dev->ops = rcu_dereference(mtk_soc_wed_ops); + if (dev->ops) + ret = dev->ops->attach(dev); + else + rcu_read_unlock(); + + if (ret) + dev->ops = NULL; +#endif + + return ret; +} + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +#define mtk_wed_device_active(_dev) !!(_dev)->ops +#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) +#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ + (_dev)->ops->txfree_ring_setup(_dev, _regs) +#define mtk_wed_device_reg_read(_dev, _reg) \ + (_dev)->ops->reg_read(_dev, _reg) +#define mtk_wed_device_reg_write(_dev, _reg, _val) \ + (_dev)->ops->reg_write(_dev, _reg, _val) +#define mtk_wed_device_irq_get(_dev, _mask) \ + (_dev)->ops->irq_get(_dev, _mask) +#define mtk_wed_device_irq_set_mask(_dev, _mask) \ + (_dev)->ops->irq_set_mask(_dev, _mask) +#else +static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) +{ + return false; +} +#define mtk_wed_device_detach(_dev) do {} while (0) +#define mtk_wed_device_start(_dev, _mask) do {} while (0) +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_reg_read(_dev, _reg) 0 +#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) +#define mtk_wed_device_irq_get(_dev, _mask) 0 +#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) +#endif + +#endif diff --git a/include/uapi/linux/user_events.h b/include/linux/user_events.h index e570840571e1..736e05603463 100644 --- a/include/uapi/linux/user_events.h +++ b/include/linux/user_events.h @@ -32,9 +32,6 @@ /* Create dynamic location entry within a 32-bit value */ #define DYN_LOC(offset, size) ((size) << 16 | (offset)) -/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ -#define FLAG_BPF_ITER (1 << 0) - /* * Describes an event registration and stores the results of the registration. * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum @@ -63,54 +60,4 @@ struct user_reg { /* Requests to delete a user_event */ #define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) -/* Data type that was passed to the BPF program */ -enum { - /* Data resides in kernel space */ - USER_BPF_DATA_KERNEL, - - /* Data resides in user space */ - USER_BPF_DATA_USER, - - /* Data is a pointer to a user_bpf_iter structure */ - USER_BPF_DATA_ITER, -}; - -/* - * Describes an iovec iterator that BPF programs can use to access data for - * a given user_event write() / writev() call. - */ -struct user_bpf_iter { - - /* Offset of the data within the first iovec */ - __u32 iov_offset; - - /* Number of iovec structures */ - __u32 nr_segs; - - /* Pointer to iovec structures */ - const struct iovec *iov; -}; - -/* Context that BPF programs receive when attached to a user_event */ -struct user_bpf_context { - - /* Data type being passed (see union below) */ - __u32 data_type; - - /* Length of the data */ - __u32 data_len; - - /* Pointer to data, varies by data type */ - union { - /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ - void *kdata; - - /* User data (data_type == USER_BPF_DATA_USER) */ - void *udata; - - /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ - struct user_bpf_iter *iter; - }; -}; - #endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 721089bb4c84..8943a209202e 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -83,7 +83,7 @@ struct vdpa_device { unsigned int index; bool features_valid; bool use_va; - int nvqs; + u32 nvqs; struct vdpa_mgmt_dev *mdev; }; @@ -207,7 +207,8 @@ struct vdpa_map_file { * @reset: Reset device * @vdev: vdpa device * Returns integer: success (0) or error (< 0) - * @get_config_size: Get the size of the configuration space + * @get_config_size: Get the size of the configuration space includes + * fields that are conditional on feature bits. * @vdev: vdpa device * Returns size_t: configuration size * @get_config: Read from device specific configuration space @@ -337,10 +338,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, dev_struct, member)), name, use_va), \ dev_struct, member) -int vdpa_register_device(struct vdpa_device *vdev, int nvqs); +int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); void vdpa_unregister_device(struct vdpa_device *vdev); -int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); +int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs); void _vdpa_unregister_device(struct vdpa_device *vdev); /** diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index dafdc7f48c01..b341dd62aa4d 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -23,8 +23,6 @@ struct virtio_shm_region { * any of @get/@set, @get_status/@set_status, or @get_features/ * @finalize_features are NOT safe to be called from an atomic * context. - * @enable_cbs: enable the callbacks - * vdev: the virtio_device * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field @@ -78,7 +76,6 @@ struct virtio_shm_region { */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { - void (*enable_cbs)(struct virtio_device *vdev); void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, @@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev) { unsigned status = dev->config->get_status(dev); - if (dev->config->enable_cbs) - dev->config->enable_cbs(dev); - BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); } diff --git a/include/linux/xarray.h b/include/linux/xarray.h index bb52b786be1b..72feab5ea8d4 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -9,6 +9,7 @@ * See Documentation/core-api/xarray.rst for how to use the XArray. */ +#include <linux/bitmap.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/gfp.h> diff --git a/include/net/act_api.h b/include/net/act_api.h index 3049cb69c025..9cf6870b526e 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -134,7 +134,8 @@ struct tc_action_ops { (*get_psample_group)(const struct tc_action *a, tc_action_priv_destructor *destructor); int (*offload_act_setup)(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind); + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack); }; struct tc_action_net { diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 4cfdef6ca4f6..c8490729b4ae 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -64,6 +64,14 @@ struct inet6_ifaddr { struct hlist_node addr_lst; struct list_head if_list; + /* + * Used to safely traverse idev->addr_list in process context + * if the idev->lock needed to protect idev->addr_list cannot be held. + * In that case, add the items to this list temporarily and iterate + * without holding idev->lock. + * See addrconf_ifdown and dev_forward_change. + */ + struct list_head if_list_aux; struct list_head tmp_list; struct inet6_ifaddr *ifpub; diff --git a/include/net/mctp.h b/include/net/mctp.h index d37268fe6825..82800d521c3d 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -36,8 +36,6 @@ struct mctp_hdr { #define MCTP_HDR_TAG_SHIFT 0 #define MCTP_HDR_TAG_MASK GENMASK(2, 0) -#define MCTP_HEADER_MAXLEN 4 - #define MCTP_INITIAL_DEFAULT_NET 1 static inline bool mctp_address_unicast(mctp_eid_t eid) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3b57a93228a..8cf001aed858 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -547,10 +547,12 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } int tc_setup_offload_action(struct flow_action *flow_action, - const struct tcf_exts *exts); + const struct tcf_exts *exts, + struct netlink_ext_ack *extack); void tc_cleanup_offload_action(struct flow_action *flow_action); int tc_setup_action(struct flow_action *flow_action, - struct tc_action *actions[]); + struct tc_action *actions[], + struct netlink_ext_ack *extack); int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, void *type_data, bool err_stop, bool rtnl_held); diff --git a/include/net/strparser.h b/include/net/strparser.h index 732b7097d78e..a191486eb1e4 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h @@ -70,6 +70,10 @@ struct sk_skb_cb { * when dst_reg == src_reg. */ u64 temp_reg; + struct tls_msg { + u8 control; + u8 decrypted; + } tls; }; static inline struct strp_msg *strp_msg(struct sk_buff *skb) diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index eb8f01c819e6..832efd40e023 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -59,4 +59,19 @@ static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a) return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK; } +static inline bool is_tcf_gact_continue(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_UNSPEC, false); +} + +static inline bool is_tcf_gact_reclassify(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_RECLASSIFY, false); +} + +static inline bool is_tcf_gact_pipe(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_PIPE, false); +} + #endif /* __NET_TC_GACT_H */ diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index 00bfee70609e..cab8229b9bed 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -94,4 +94,16 @@ static inline u32 tcf_skbedit_priority(const struct tc_action *a) return priority; } +/* Return true iff action is queue_mapping */ +static inline bool is_tcf_skbedit_queue_mapping(const struct tc_action *a) +{ + return is_tcf_skbedit_with_flag(a, SKBEDIT_F_QUEUE_MAPPING); +} + +/* Return true iff action is inheritdsfield */ +static inline bool is_tcf_skbedit_inheritdsfield(const struct tc_action *a) +{ + return is_tcf_skbedit_with_flag(a, SKBEDIT_F_INHERITDSFIELD); +} + #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 70ca4a5e330a..6d50a662bf89 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1139,15 +1139,6 @@ static inline bool tcp_ca_needs_ecn(const struct sock *sk) return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; } -static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_ca_ops->set_state) - icsk->icsk_ca_ops->set_state(sk, ca_state); - icsk->icsk_ca_state = ca_state; -} - static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -1156,6 +1147,9 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) icsk->icsk_ca_ops->cwnd_event(sk, event); } +/* From tcp_cong.c */ +void tcp_set_ca_state(struct sock *sk, const u8 ca_state); + /* From tcp_rate.c */ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, @@ -1207,9 +1201,20 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) #define TCP_INFINITE_SSTHRESH 0x7fffffff +static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp) +{ + return tp->snd_cwnd; +} + +static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val) +{ + WARN_ON_ONCE((int)val <= 0); + tp->snd_cwnd = val; +} + static inline bool tcp_in_slow_start(const struct tcp_sock *tp) { - return tp->snd_cwnd < tp->snd_ssthresh; + return tcp_snd_cwnd(tp) < tp->snd_ssthresh; } static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) @@ -1235,8 +1240,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) return tp->snd_ssthresh; else return max(tp->snd_ssthresh, - ((tp->snd_cwnd >> 1) + - (tp->snd_cwnd >> 2))); + ((tcp_snd_cwnd(tp) >> 1) + + (tcp_snd_cwnd(tp) >> 2))); } /* Use define here intentionally to get WARN_ON location shown at the caller */ @@ -1278,7 +1283,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) /* If in slow start, ensure cwnd grows to twice what was ACKed. */ if (tcp_in_slow_start(tp)) - return tp->snd_cwnd < 2 * tp->max_packets_out; + return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out; return tp->is_cwnd_limited; } diff --git a/include/net/tls.h b/include/net/tls.h index b6968a5b5538..a01c264e5f15 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -64,6 +64,7 @@ #define TLS_AAD_SPACE_SIZE 13 #define MAX_IV_SIZE 16 +#define TLS_TAG_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. @@ -117,11 +118,6 @@ struct tls_rec { u8 aead_req_ctx[]; }; -struct tls_msg { - struct strp_msg rxm; - u8 control; -}; - struct tx_work { struct delayed_work work; struct sock *sk; @@ -152,9 +148,7 @@ struct tls_sw_context_rx { void (*saved_data_ready)(struct sock *sk); struct sk_buff *recv_pkt; - u8 control; u8 async_capable:1; - u8 decrypted:1; atomic_t decrypt_pending; /* protect crypto_wait with decrypt_pending*/ spinlock_t decrypt_compl_lock; @@ -411,7 +405,9 @@ void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); static inline struct tls_msg *tls_msg(struct sk_buff *skb) { - return (struct tls_msg *)strp_msg(skb); + struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb; + + return &scb->tls; } static inline bool tls_is_partially_sent_record(struct tls_context *ctx) diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 314f2779cab5..6b99310b5b88 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -402,6 +402,7 @@ struct snd_pcm_runtime { struct fasync_struct *fasync; bool stop_operating; /* sync_stop will be called */ struct mutex buffer_mutex; /* protect for buffer changes */ + atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */ /* -- private section -- */ void *private_data; diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index 2c530637e10a..311c14a20e70 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -426,8 +426,8 @@ TRACE_EVENT(cachefiles_vol_coherency, ); TRACE_EVENT(cachefiles_prep_read, - TP_PROTO(struct netfs_read_subrequest *sreq, - enum netfs_read_source source, + TP_PROTO(struct netfs_io_subrequest *sreq, + enum netfs_io_source source, enum cachefiles_prepare_read_trace why, ino_t cache_inode), @@ -437,7 +437,7 @@ TRACE_EVENT(cachefiles_prep_read, __field(unsigned int, rreq ) __field(unsigned short, index ) __field(unsigned short, flags ) - __field(enum netfs_read_source, source ) + __field(enum netfs_io_source, source ) __field(enum cachefiles_prepare_read_trace, why ) __field(size_t, len ) __field(loff_t, start ) diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index e6f4ebbb4c69..beec534cbaab 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -15,63 +15,25 @@ /* * Define enums for tracing information. */ -#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY -#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY - -enum netfs_read_trace { - netfs_read_trace_expanded, - netfs_read_trace_readahead, - netfs_read_trace_readpage, - netfs_read_trace_write_begin, -}; - -enum netfs_rreq_trace { - netfs_rreq_trace_assess, - netfs_rreq_trace_done, - netfs_rreq_trace_free, - netfs_rreq_trace_resubmit, - netfs_rreq_trace_unlock, - netfs_rreq_trace_unmark, - netfs_rreq_trace_write, -}; - -enum netfs_sreq_trace { - netfs_sreq_trace_download_instead, - netfs_sreq_trace_free, - netfs_sreq_trace_prepare, - netfs_sreq_trace_resubmit_short, - netfs_sreq_trace_submit, - netfs_sreq_trace_terminated, - netfs_sreq_trace_write, - netfs_sreq_trace_write_skip, - netfs_sreq_trace_write_term, -}; - -enum netfs_failure { - netfs_fail_check_write_begin, - netfs_fail_copy_to_cache, - netfs_fail_read, - netfs_fail_short_readpage, - netfs_fail_short_write_begin, - netfs_fail_prepare_write, -}; - -#endif - #define netfs_read_traces \ EM(netfs_read_trace_expanded, "EXPANDED ") \ EM(netfs_read_trace_readahead, "READAHEAD") \ EM(netfs_read_trace_readpage, "READPAGE ") \ E_(netfs_read_trace_write_begin, "WRITEBEGN") +#define netfs_rreq_origins \ + EM(NETFS_READAHEAD, "RA") \ + EM(NETFS_READPAGE, "RP") \ + E_(NETFS_READ_FOR_WRITE, "RW") + #define netfs_rreq_traces \ - EM(netfs_rreq_trace_assess, "ASSESS") \ - EM(netfs_rreq_trace_done, "DONE ") \ - EM(netfs_rreq_trace_free, "FREE ") \ - EM(netfs_rreq_trace_resubmit, "RESUBM") \ - EM(netfs_rreq_trace_unlock, "UNLOCK") \ - EM(netfs_rreq_trace_unmark, "UNMARK") \ - E_(netfs_rreq_trace_write, "WRITE ") + EM(netfs_rreq_trace_assess, "ASSESS ") \ + EM(netfs_rreq_trace_copy, "COPY ") \ + EM(netfs_rreq_trace_done, "DONE ") \ + EM(netfs_rreq_trace_free, "FREE ") \ + EM(netfs_rreq_trace_resubmit, "RESUBMT") \ + EM(netfs_rreq_trace_unlock, "UNLOCK ") \ + E_(netfs_rreq_trace_unmark, "UNMARK ") #define netfs_sreq_sources \ EM(NETFS_FILL_WITH_ZEROES, "ZERO") \ @@ -94,10 +56,47 @@ enum netfs_failure { EM(netfs_fail_check_write_begin, "check-write-begin") \ EM(netfs_fail_copy_to_cache, "copy-to-cache") \ EM(netfs_fail_read, "read") \ - EM(netfs_fail_short_readpage, "short-readpage") \ - EM(netfs_fail_short_write_begin, "short-write-begin") \ + EM(netfs_fail_short_read, "short-read") \ E_(netfs_fail_prepare_write, "prep-write") +#define netfs_rreq_ref_traces \ + EM(netfs_rreq_trace_get_hold, "GET HOLD ") \ + EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ + EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ + EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ + EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ + EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \ + EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ + EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \ + E_(netfs_rreq_trace_new, "NEW ") + +#define netfs_sreq_ref_traces \ + EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \ + EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \ + EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ + EM(netfs_sreq_trace_new, "NEW ") \ + EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ + EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \ + EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \ + EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \ + E_(netfs_sreq_trace_put_terminated, "PUT TERM ") + +#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY + +#undef EM +#undef E_ +#define EM(a, b) a, +#define E_(a, b) a + +enum netfs_read_trace { netfs_read_traces } __mode(byte); +enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte); +enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte); +enum netfs_failure { netfs_failures } __mode(byte); +enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte); +enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); + +#endif /* * Export enum symbols via userspace. @@ -108,10 +107,13 @@ enum netfs_failure { #define E_(a, b) TRACE_DEFINE_ENUM(a); netfs_read_traces; +netfs_rreq_origins; netfs_rreq_traces; netfs_sreq_sources; netfs_sreq_traces; netfs_failures; +netfs_rreq_ref_traces; +netfs_sreq_ref_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -123,7 +125,7 @@ netfs_failures; #define E_(a, b) { a, b } TRACE_EVENT(netfs_read, - TP_PROTO(struct netfs_read_request *rreq, + TP_PROTO(struct netfs_io_request *rreq, loff_t start, size_t len, enum netfs_read_trace what), @@ -156,31 +158,34 @@ TRACE_EVENT(netfs_read, ); TRACE_EVENT(netfs_rreq, - TP_PROTO(struct netfs_read_request *rreq, + TP_PROTO(struct netfs_io_request *rreq, enum netfs_rreq_trace what), TP_ARGS(rreq, what), TP_STRUCT__entry( __field(unsigned int, rreq ) - __field(unsigned short, flags ) + __field(unsigned int, flags ) + __field(enum netfs_io_origin, origin ) __field(enum netfs_rreq_trace, what ) ), TP_fast_assign( __entry->rreq = rreq->debug_id; __entry->flags = rreq->flags; + __entry->origin = rreq->origin; __entry->what = what; ), - TP_printk("R=%08x %s f=%02x", + TP_printk("R=%08x %s %s f=%02x", __entry->rreq, + __print_symbolic(__entry->origin, netfs_rreq_origins), __print_symbolic(__entry->what, netfs_rreq_traces), __entry->flags) ); TRACE_EVENT(netfs_sreq, - TP_PROTO(struct netfs_read_subrequest *sreq, + TP_PROTO(struct netfs_io_subrequest *sreq, enum netfs_sreq_trace what), TP_ARGS(sreq, what), @@ -190,7 +195,7 @@ TRACE_EVENT(netfs_sreq, __field(unsigned short, index ) __field(short, error ) __field(unsigned short, flags ) - __field(enum netfs_read_source, source ) + __field(enum netfs_io_source, source ) __field(enum netfs_sreq_trace, what ) __field(size_t, len ) __field(size_t, transferred ) @@ -211,26 +216,26 @@ TRACE_EVENT(netfs_sreq, TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d", __entry->rreq, __entry->index, - __print_symbolic(__entry->what, netfs_sreq_traces), __print_symbolic(__entry->source, netfs_sreq_sources), + __print_symbolic(__entry->what, netfs_sreq_traces), __entry->flags, __entry->start, __entry->transferred, __entry->len, __entry->error) ); TRACE_EVENT(netfs_failure, - TP_PROTO(struct netfs_read_request *rreq, - struct netfs_read_subrequest *sreq, + TP_PROTO(struct netfs_io_request *rreq, + struct netfs_io_subrequest *sreq, int error, enum netfs_failure what), TP_ARGS(rreq, sreq, error, what), TP_STRUCT__entry( __field(unsigned int, rreq ) - __field(unsigned short, index ) + __field(short, index ) __field(short, error ) __field(unsigned short, flags ) - __field(enum netfs_read_source, source ) + __field(enum netfs_io_source, source ) __field(enum netfs_failure, what ) __field(size_t, len ) __field(size_t, transferred ) @@ -239,17 +244,17 @@ TRACE_EVENT(netfs_failure, TP_fast_assign( __entry->rreq = rreq->debug_id; - __entry->index = sreq ? sreq->debug_index : 0; + __entry->index = sreq ? sreq->debug_index : -1; __entry->error = error; __entry->flags = sreq ? sreq->flags : 0; __entry->source = sreq ? sreq->source : NETFS_INVALID_READ; __entry->what = what; - __entry->len = sreq ? sreq->len : 0; + __entry->len = sreq ? sreq->len : rreq->len; __entry->transferred = sreq ? sreq->transferred : 0; __entry->start = sreq ? sreq->start : 0; ), - TP_printk("R=%08x[%u] %s f=%02x s=%llx %zx/%zx %s e=%d", + TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __entry->flags, @@ -258,6 +263,59 @@ TRACE_EVENT(netfs_failure, __entry->error) ); +TRACE_EVENT(netfs_rreq_ref, + TP_PROTO(unsigned int rreq_debug_id, int ref, + enum netfs_rreq_ref_trace what), + + TP_ARGS(rreq_debug_id, ref, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(int, ref ) + __field(enum netfs_rreq_ref_trace, what ) + ), + + TP_fast_assign( + __entry->rreq = rreq_debug_id; + __entry->ref = ref; + __entry->what = what; + ), + + TP_printk("R=%08x %s r=%u", + __entry->rreq, + __print_symbolic(__entry->what, netfs_rreq_ref_traces), + __entry->ref) + ); + +TRACE_EVENT(netfs_sreq_ref, + TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index, + int ref, enum netfs_sreq_ref_trace what), + + TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(unsigned int, subreq ) + __field(int, ref ) + __field(enum netfs_sreq_ref_trace, what ) + ), + + TP_fast_assign( + __entry->rreq = rreq_debug_id; + __entry->subreq = subreq_debug_index; + __entry->ref = ref; + __entry->what = what; + ), + + TP_printk("R=%08x[%x] %s r=%u", + __entry->rreq, + __entry->subreq, + __print_symbolic(__entry->what, netfs_sreq_ref_traces), + __entry->ref) + ); + +#undef EM +#undef E_ #endif /* _TRACE_NETFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 521059d8dc0a..901b440238d5 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -279,7 +279,7 @@ TRACE_EVENT(tcp_probe, __entry->data_len = skb->len - __tcp_hdrlen(th); __entry->snd_nxt = tp->snd_nxt; __entry->snd_una = tp->snd_una; - __entry->snd_cwnd = tp->snd_cwnd; + __entry->snd_cwnd = tcp_snd_cwnd(tp); __entry->snd_wnd = tp->snd_wnd; __entry->rcv_wnd = tp->rcv_wnd; __entry->ssthresh = tcp_current_ssthresh(sk); @@ -371,6 +371,51 @@ DEFINE_EVENT(tcp_event_skb, tcp_bad_csum, TP_ARGS(skb) ); +TRACE_EVENT(tcp_cong_state_set, + + TP_PROTO(struct sock *sk, const u8 ca_state), + + TP_ARGS(sk, ca_state), + + TP_STRUCT__entry( + __field(const void *, skaddr) + __field(__u16, sport) + __field(__u16, dport) + __array(__u8, saddr, 4) + __array(__u8, daddr, 4) + __array(__u8, saddr_v6, 16) + __array(__u8, daddr_v6, 16) + __field(__u8, cong_state) + ), + + TP_fast_assign( + struct inet_sock *inet = inet_sk(sk); + __be32 *p32; + + __entry->skaddr = sk; + + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + + p32 = (__be32 *) __entry->saddr; + *p32 = inet->inet_saddr; + + p32 = (__be32 *) __entry->daddr; + *p32 = inet->inet_daddr; + + TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, + sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); + + __entry->cong_state = ca_state; + ), + + TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u", + __entry->sport, __entry->dport, + __entry->saddr, __entry->daddr, + __entry->saddr_v6, __entry->daddr_v6, + __entry->cong_state) +); + #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ diff --git a/include/trace/stages/stage1_defines.h b/include/trace/stages/stage1_struct_define.h index a16783419687..a16783419687 100644 --- a/include/trace/stages/stage1_defines.h +++ b/include/trace/stages/stage1_struct_define.h diff --git a/include/trace/stages/stage2_defines.h b/include/trace/stages/stage2_data_offsets.h index 42fd1e8813ec..42fd1e8813ec 100644 --- a/include/trace/stages/stage2_defines.h +++ b/include/trace/stages/stage2_data_offsets.h diff --git a/include/trace/stages/stage3_defines.h b/include/trace/stages/stage3_trace_output.h index e3b183e9d18e..e3b183e9d18e 100644 --- a/include/trace/stages/stage3_defines.h +++ b/include/trace/stages/stage3_trace_output.h diff --git a/include/trace/stages/stage4_defines.h b/include/trace/stages/stage4_event_fields.h index e80cdc397a43..e80cdc397a43 100644 --- a/include/trace/stages/stage4_defines.h +++ b/include/trace/stages/stage4_event_fields.h diff --git a/include/trace/stages/stage5_defines.h b/include/trace/stages/stage5_get_offsets.h index 7ee5931300e6..7ee5931300e6 100644 --- a/include/trace/stages/stage5_defines.h +++ b/include/trace/stages/stage5_get_offsets.h diff --git a/include/trace/stages/stage6_defines.h b/include/trace/stages/stage6_event_callback.h index e1724f73594b..e1724f73594b 100644 --- a/include/trace/stages/stage6_defines.h +++ b/include/trace/stages/stage6_event_callback.h diff --git a/include/trace/stages/stage7_defines.h b/include/trace/stages/stage7_class_define.h index 8a7ec24c246d..8a7ec24c246d 100644 --- a/include/trace/stages/stage7_defines.h +++ b/include/trace/stages/stage7_class_define.h diff --git a/include/trace/trace_custom_events.h b/include/trace/trace_custom_events.h index b567c7202339..6e492dba96bf 100644 --- a/include/trace/trace_custom_events.h +++ b/include/trace/trace_custom_events.h @@ -35,7 +35,7 @@ /* Stage 1 creates the structure of the recorded event layout */ -#include "stages/stage1_defines.h" +#include "stages/stage1_struct_define.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ @@ -56,7 +56,7 @@ /* Stage 2 creates the custom class */ -#include "stages/stage2_defines.h" +#include "stages/stage2_data_offsets.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -71,7 +71,7 @@ /* Stage 3 create the way to print the custom event */ -#include "stages/stage3_defines.h" +#include "stages/stage3_trace_output.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -102,7 +102,7 @@ static struct trace_event_functions trace_custom_event_type_funcs_##call = { \ /* Stage 4 creates the offset layout for the fields */ -#include "stages/stage4_defines.h" +#include "stages/stage4_event_fields.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \ @@ -114,7 +114,7 @@ static struct trace_event_fields trace_custom_event_fields_##call[] = { \ /* Stage 5 creates the helper function for dynamic fields */ -#include "stages/stage5_defines.h" +#include "stages/stage5_get_offsets.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -134,7 +134,7 @@ static inline notrace int trace_custom_event_get_offsets_##call( \ /* Stage 6 creates the probe function that records the event */ -#include "stages/stage6_defines.h" +#include "stages/stage6_event_callback.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -182,7 +182,7 @@ static inline void ftrace_test_custom_probe_##call(void) \ /* Stage 7 creates the actual class and event structure for the custom event */ -#include "stages/stage7_defines.h" +#include "stages/stage7_class_define.h" #undef DECLARE_CUSTOM_EVENT_CLASS #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 8a8cd66cc6d5..c2f9cabf154d 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -45,7 +45,7 @@ PARAMS(print)); \ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); -#include "stages/stage1_defines.h" +#include "stages/stage1_struct_define.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ @@ -109,7 +109,7 @@ * The size of an array is also encoded, in the higher 16 bits of <item>. */ -#include "stages/stage2_defines.h" +#include "stages/stage2_data_offsets.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -181,7 +181,7 @@ * in binary. */ -#include "stages/stage3_defines.h" +#include "stages/stage3_trace_output.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -236,7 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#include "stages/stage4_defines.h" +#include "stages/stage4_event_fields.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ @@ -249,7 +249,7 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#include "stages/stage5_defines.h" +#include "stages/stage5_get_offsets.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -372,7 +372,7 @@ static inline notrace int trace_event_get_offsets_##call( \ #define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ -#include "stages/stage6_defines.h" +#include "stages/stage6_event_callback.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -418,7 +418,7 @@ static inline void ftrace_test_probe_##call(void) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#include "stages/stage7_defines.h" +#include "stages/stage7_class_define.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cc284c048e69..d1e600816b82 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,8 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; }; /* Subset of link stats useful for in-HW collection. Meaning of the fields is as diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index d2be4eb22008..784adc6f6ed2 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -201,11 +201,9 @@ struct io_uring_cqe { * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries - * IORING_CQE_F_MSG If set, CQE was generated with IORING_OP_MSG_RING */ #define IORING_CQE_F_BUFFER (1U << 0) #define IORING_CQE_F_MORE (1U << 1) -#define IORING_CQE_F_MSG (1U << 2) enum { IORING_CQE_BUFFER_SHIFT = 16, diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 24a1c45bd1ae..98e60801195e 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -45,7 +45,7 @@ struct loop_info { unsigned long lo_inode; /* ioctl r/o */ __kernel_old_dev_t lo_rdevice; /* ioctl r/o */ int lo_offset; - int lo_encrypt_type; + int lo_encrypt_type; /* obsolete, ignored */ int lo_encrypt_key_size; /* ioctl w/o */ int lo_flags; char lo_name[LO_NAME_SIZE]; @@ -61,7 +61,7 @@ struct loop_info64 { __u64 lo_offset; __u64 lo_sizelimit;/* bytes, 0 == max available */ __u32 lo_number; /* ioctl r/o */ - __u32 lo_encrypt_type; + __u32 lo_encrypt_type; /* obsolete, ignored */ __u32 lo_encrypt_key_size; /* ioctl w/o */ __u32 lo_flags; __u8 lo_file_name[LO_NAME_SIZE]; diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h index 03e5b776e597..97aca4503a6a 100644 --- a/include/uapi/linux/rtc.h +++ b/include/uapi/linux/rtc.h @@ -133,7 +133,8 @@ struct rtc_param { #define RTC_FEATURE_UPDATE_INTERRUPT 4 #define RTC_FEATURE_CORRECTION 5 #define RTC_FEATURE_BACKUP_SWITCH_MODE 6 -#define RTC_FEATURE_CNT 7 +#define RTC_FEATURE_ALARM_WAKEUP_ONLY 7 +#define RTC_FEATURE_CNT 8 /* parameter list */ #define RTC_PARAM_FEATURES 0 diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h index 4dfc05651c98..c00adf2fe868 100644 --- a/include/uapi/linux/tipc_config.h +++ b/include/uapi/linux/tipc_config.h @@ -43,10 +43,6 @@ #include <linux/tipc.h> #include <asm/byteorder.h> -#ifndef __KERNEL__ -#include <arpa/inet.h> /* for ntohs etc. */ -#endif - /* * Configuration * @@ -269,33 +265,33 @@ static inline int TLV_OK(const void *tlv, __u16 space) */ return (space >= TLV_SPACE(0)) && - (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space); + (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_len) <= space); } static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type) { return TLV_OK(tlv, space) && - (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type); + (__be16_to_cpu(((struct tlv_desc *)tlv)->tlv_type) == exp_type); } static inline int TLV_GET_LEN(struct tlv_desc *tlv) { - return ntohs(tlv->tlv_len); + return __be16_to_cpu(tlv->tlv_len); } static inline void TLV_SET_LEN(struct tlv_desc *tlv, __u16 len) { - tlv->tlv_len = htons(len); + tlv->tlv_len = __cpu_to_be16(len); } static inline int TLV_CHECK_TYPE(struct tlv_desc *tlv, __u16 type) { - return (ntohs(tlv->tlv_type) == type); + return (__be16_to_cpu(tlv->tlv_type) == type); } static inline void TLV_SET_TYPE(struct tlv_desc *tlv, __u16 type) { - tlv->tlv_type = htons(type); + tlv->tlv_type = __cpu_to_be16(type); } static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) @@ -305,8 +301,8 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len) tlv_len = TLV_LENGTH(len); tlv_ptr = (struct tlv_desc *)tlv; - tlv_ptr->tlv_type = htons(type); - tlv_ptr->tlv_len = htons(tlv_len); + tlv_ptr->tlv_type = __cpu_to_be16(type); + tlv_ptr->tlv_len = __cpu_to_be16(tlv_len); if (len && data) { memcpy(TLV_DATA(tlv_ptr), data, len); memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len); @@ -348,7 +344,7 @@ static inline void *TLV_LIST_DATA(struct tlv_list_desc *list) static inline void TLV_LIST_STEP(struct tlv_list_desc *list) { - __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len)); + __u16 tlv_space = TLV_ALIGN(__be16_to_cpu(list->tlv_ptr->tlv_len)); list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space); list->tlv_space -= tlv_space; @@ -404,9 +400,9 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags, msg_len = TCM_LENGTH(data_len); tcm_hdr = (struct tipc_cfg_msg_hdr *)msg; - tcm_hdr->tcm_len = htonl(msg_len); - tcm_hdr->tcm_type = htons(cmd); - tcm_hdr->tcm_flags = htons(flags); + tcm_hdr->tcm_len = __cpu_to_be32(msg_len); + tcm_hdr->tcm_type = __cpu_to_be16(cmd); + tcm_hdr->tcm_flags = __cpu_to_be16(flags); if (data_len && data) { memcpy(TCM_DATA(msg), data, data_len); memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len); diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index c998860d7bbc..5d99e7c242a2 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -150,4 +150,11 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) + +/* Get the config size */ +#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) + +/* Get the count of all virtqueues */ +#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) + #endif diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index b5eda06f0d57..f0fb0ae021c0 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -83,6 +83,12 @@ #define VIRTIO_F_RING_PACKED 34 /* + * Inorder feature indicates that all buffers are used by the device + * in the same order in which they have been made available. + */ +#define VIRTIO_F_IN_ORDER 35 + +/* * This feature indicates that memory accesses by the driver and the * device are ordered in a way described by the platform. */ diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h index a03932f10565..71a54a6849ca 100644 --- a/include/uapi/linux/virtio_crypto.h +++ b/include/uapi/linux/virtio_crypto.h @@ -37,6 +37,7 @@ #define VIRTIO_CRYPTO_SERVICE_HASH 1 #define VIRTIO_CRYPTO_SERVICE_MAC 2 #define VIRTIO_CRYPTO_SERVICE_AEAD 3 +#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4 #define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op)) @@ -57,6 +58,10 @@ struct virtio_crypto_ctrl_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02) #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03) +#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04) +#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05) __le32 opcode; __le32 algo; __le32 flag; @@ -180,6 +185,58 @@ struct virtio_crypto_aead_create_session_req { __u8 padding[32]; }; +struct virtio_crypto_rsa_session_para { +#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0 +#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1 + __le32 padding_algo; + +#define VIRTIO_CRYPTO_RSA_NO_HASH 0 +#define VIRTIO_CRYPTO_RSA_MD2 1 +#define VIRTIO_CRYPTO_RSA_MD3 2 +#define VIRTIO_CRYPTO_RSA_MD4 3 +#define VIRTIO_CRYPTO_RSA_MD5 4 +#define VIRTIO_CRYPTO_RSA_SHA1 5 +#define VIRTIO_CRYPTO_RSA_SHA256 6 +#define VIRTIO_CRYPTO_RSA_SHA384 7 +#define VIRTIO_CRYPTO_RSA_SHA512 8 +#define VIRTIO_CRYPTO_RSA_SHA224 9 + __le32 hash_algo; +}; + +struct virtio_crypto_ecdsa_session_para { +#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0 +#define VIRTIO_CRYPTO_CURVE_NIST_P192 1 +#define VIRTIO_CRYPTO_CURVE_NIST_P224 2 +#define VIRTIO_CRYPTO_CURVE_NIST_P256 3 +#define VIRTIO_CRYPTO_CURVE_NIST_P384 4 +#define VIRTIO_CRYPTO_CURVE_NIST_P521 5 + __le32 curve_id; + __le32 padding; +}; + +struct virtio_crypto_akcipher_session_para { +#define VIRTIO_CRYPTO_NO_AKCIPHER 0 +#define VIRTIO_CRYPTO_AKCIPHER_RSA 1 +#define VIRTIO_CRYPTO_AKCIPHER_DSA 2 +#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3 + __le32 algo; + +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1 +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2 + __le32 keytype; + __le32 keylen; + + union { + struct virtio_crypto_rsa_session_para rsa; + struct virtio_crypto_ecdsa_session_para ecdsa; + } u; +}; + +struct virtio_crypto_akcipher_create_session_req { + struct virtio_crypto_akcipher_session_para para; + __u8 padding[36]; +}; + struct virtio_crypto_alg_chain_session_para { #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 @@ -247,6 +304,8 @@ struct virtio_crypto_op_ctrl_req { mac_create_session; struct virtio_crypto_aead_create_session_req aead_create_session; + struct virtio_crypto_akcipher_create_session_req + akcipher_create_session; struct virtio_crypto_destroy_session_req destroy_session; __u8 padding[56]; @@ -266,6 +325,14 @@ struct virtio_crypto_op_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00) #define VIRTIO_CRYPTO_AEAD_DECRYPT \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00) +#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_SIGN \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02) +#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \ + VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03) __le32 opcode; /* algo should be service-specific algorithms */ __le32 algo; @@ -390,6 +457,16 @@ struct virtio_crypto_aead_data_req { __u8 padding[32]; }; +struct virtio_crypto_akcipher_para { + __le32 src_data_len; + __le32 dst_data_len; +}; + +struct virtio_crypto_akcipher_data_req { + struct virtio_crypto_akcipher_para para; + __u8 padding[40]; +}; + /* The request of the data virtqueue's packet */ struct virtio_crypto_op_data_req { struct virtio_crypto_op_header header; @@ -399,6 +476,7 @@ struct virtio_crypto_op_data_req { struct virtio_crypto_hash_data_req hash_req; struct virtio_crypto_mac_data_req mac_req; struct virtio_crypto_aead_data_req aead_req; + struct virtio_crypto_akcipher_data_req akcipher_req; __u8 padding[48]; } u; }; @@ -408,6 +486,8 @@ struct virtio_crypto_op_data_req { #define VIRTIO_CRYPTO_BADMSG 2 #define VIRTIO_CRYPTO_NOTSUPP 3 #define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ +#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */ +#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */ /* The accelerator hardware is ready */ #define VIRTIO_CRYPTO_S_HW_READY (1 << 0) @@ -438,7 +518,7 @@ struct virtio_crypto_config { __le32 max_cipher_key_len; /* Maximum length of authenticated key */ __le32 max_auth_key_len; - __le32 reserve; + __le32 akcipher_algo; /* Maximum size of each crypto request's content */ __le64 max_size; }; |