diff options
Diffstat (limited to 'include')
484 files changed, 12331 insertions, 5228 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 9f20eb4acaa6..204f5819d464 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -193,8 +193,9 @@ struct acpi_exception_info { #define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020) #define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021) #define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) +#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) -#define AE_CODE_AML_MAX 0x0022 +#define AE_CODE_AML_MAX 0x0023 /* * Internal exceptions used for control @@ -358,7 +359,9 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { EXCEP_TXT("AE_AML_INFINITE_LOOP", "An apparent infinite AML While loop, method was aborted"), EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", - "A namespace node is uninitialized or unresolved") + "A namespace node is uninitialized or unresolved"), + EXCEP_TXT("AE_AML_TARGET_TYPE", + "A target operand of an incorrect type was encountered") }; static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 5ba8fb64f664..d11eff8a4efe 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -129,7 +129,7 @@ static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile( struct acpi_scan_handler { const struct acpi_device_id *ids; struct list_head list_node; - bool (*match)(char *idstr, const struct acpi_device_id **matchid); + bool (*match)(const char *idstr, const struct acpi_device_id **matchid); int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id); void (*detach)(struct acpi_device *dev); void (*bind)(struct device *phys_dev); @@ -227,7 +227,7 @@ typedef char acpi_device_class[20]; struct acpi_hardware_id { struct list_head list; - char *id; + const char *id; }; struct acpi_pnp_type { @@ -343,6 +343,7 @@ struct acpi_device_data { const union acpi_object *pointer; const union acpi_object *properties; const union acpi_object *of_compatible; + struct list_head subnodes; }; struct acpi_gpio_mapping; @@ -378,6 +379,17 @@ struct acpi_device { void (*remove)(struct acpi_device *); }; +/* Non-device subnode */ +struct acpi_data_node { + const char *name; + acpi_handle handle; + struct fwnode_handle fwnode; + struct acpi_device_data data; + struct list_head sibling; + struct kobject kobj; + struct completion kobj_done; +}; + static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent) { bool ret = false; @@ -413,15 +425,32 @@ static inline bool acpi_check_dma(struct acpi_device *adev, bool *coherent) static inline bool is_acpi_node(struct fwnode_handle *fwnode) { + return fwnode && (fwnode->type == FWNODE_ACPI + || fwnode->type == FWNODE_ACPI_DATA); +} + +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) +{ return fwnode && fwnode->type == FWNODE_ACPI; } -static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) { - return is_acpi_node(fwnode) ? + return is_acpi_device_node(fwnode) ? container_of(fwnode, struct acpi_device, fwnode) : NULL; } +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_ACPI_DATA; +} + +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +{ + return is_acpi_data_node(fwnode) ? + container_of(fwnode, struct acpi_data_node, fwnode) : NULL; +} + static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) { return &adev->fwnode; diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index a54ad1cc990c..fbc2baf2b9dc 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -55,7 +55,8 @@ typedef enum { OSL_GLOBAL_LOCK_HANDLER, OSL_NOTIFY_HANDLER, OSL_GPE_HANDLER, - OSL_DEBUGGER_THREAD, + OSL_DEBUGGER_MAIN_THREAD, + OSL_DEBUGGER_EXEC_THREAD, OSL_EC_POLL_HANDLER, OSL_EC_BURST_HANDLER } acpi_execute_type; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index c33eeabde160..3aaaa8630735 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20150818 +#define ACPI_CA_VERSION 0x20150930 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -393,15 +393,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_terminate(void)) */ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void)) -#ifdef ACPI_FUTURE_USAGE ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_subsystem_status(void)) -#endif -#ifdef ACPI_FUTURE_USAGE ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer)) -#endif ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_statistics(struct acpi_statistics *stats)) ACPI_EXTERNAL_RETURN_PTR(const char @@ -625,11 +621,9 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status space_id, acpi_adr_space_handler handler)) -#ifdef ACPI_FUTURE_USAGE ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_install_exception_handler (acpi_exception_handler handler)) -#endif ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_install_interface_handler (acpi_interface_handler handler)) @@ -750,12 +744,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_current_resources(acpi_handle device, struct acpi_buffer *ret_buffer)) -#ifdef ACPI_FUTURE_USAGE ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_possible_resources(acpi_handle device, struct acpi_buffer *ret_buffer)) -#endif ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_event_resources(acpi_handle device_handle, struct acpi_buffer @@ -844,7 +836,6 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status /* * ACPI Timer interfaces */ -#ifdef ACPI_FUTURE_USAGE ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer_resolution(u32 *resolution)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks)) @@ -853,7 +844,6 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)) -#endif /* ACPI_FUTURE_USAGE */ /* * Error/Warning output @@ -939,4 +929,6 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status void **data, void (*callback)(void *))) +void acpi_set_debugger_thread_id(acpi_thread_id thread_id); + #endif /* __ACXFACE_H__ */ diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index fcd570999f35..1bb979e3e3f5 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -1012,7 +1012,7 @@ struct acpi_nfit_memory_map { #define ACPI_NFIT_MEM_SAVE_FAILED (1) /* 00: Last SAVE to Memory Device failed */ #define ACPI_NFIT_MEM_RESTORE_FAILED (1<<1) /* 01: Last RESTORE from Memory Device failed */ #define ACPI_NFIT_MEM_FLUSH_FAILED (1<<2) /* 02: Platform flush failed */ -#define ACPI_NFIT_MEM_ARMED (1<<3) /* 03: Memory Device observed to be not armed */ +#define ACPI_NFIT_MEM_NOT_ARMED (1<<3) /* 03: Memory Device is not armed */ #define ACPI_NFIT_MEM_HEALTH_OBSERVED (1<<4) /* 04: Memory Device observed SMART/health events */ #define ACPI_NFIT_MEM_HEALTH_ENABLED (1<<5) /* 05: SMART/health events enabled */ diff --git a/include/acpi/button.h b/include/acpi/button.h index 97eea0e4c016..1cad8b2d460c 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h @@ -3,7 +3,7 @@ #include <linux/notifier.h> -#if defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) +#if IS_ENABLED(CONFIG_ACPI_BUTTON) extern int acpi_lid_notifier_register(struct notifier_block *nb); extern int acpi_lid_notifier_unregister(struct notifier_block *nb); extern int acpi_lid_open(void); @@ -20,6 +20,6 @@ static inline int acpi_lid_open(void) { return 1; } -#endif /* defined(CONFIG_ACPI_BUTTON) || defined(CONFIG_ACPI_BUTTON_MODULE) */ +#endif /* IS_ENABLED(CONFIG_ACPI_BUTTON) */ #endif /* ACPI_BUTTON_H */ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h new file mode 100644 index 000000000000..717a29810473 --- /dev/null +++ b/include/acpi/cppc_acpi.h @@ -0,0 +1,138 @@ +/* + * CPPC (Collaborative Processor Performance Control) methods used + * by CPUfreq drivers. + * + * (C) Copyright 2014, 2015 Linaro Ltd. + * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#ifndef _CPPC_ACPI_H +#define _CPPC_ACPI_H + +#include <linux/acpi.h> +#include <linux/mailbox_controller.h> +#include <linux/mailbox_client.h> +#include <linux/types.h> + +#include <acpi/processor.h> + +/* Only support CPPCv2 for now. */ +#define CPPC_NUM_ENT 21 +#define CPPC_REV 2 + +#define PCC_CMD_COMPLETE 1 +#define MAX_CPC_REG_ENT 19 + +/* CPPC specific PCC commands. */ +#define CMD_READ 0 +#define CMD_WRITE 1 + +/* Each register has the folowing format. */ +struct cpc_reg { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_width; + u64 __iomem address; +} __packed; + +/* + * Each entry in the CPC table is either + * of type ACPI_TYPE_BUFFER or + * ACPI_TYPE_INTEGER. + */ +struct cpc_register_resource { + acpi_object_type type; + union { + struct cpc_reg reg; + u64 int_value; + } cpc_entry; +}; + +/* Container to hold the CPC details for each CPU */ +struct cpc_desc { + int num_entries; + int version; + int cpu_id; + struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; + struct acpi_psd_package domain_info; +}; + +/* These are indexes into the per-cpu cpc_regs[]. Order is important. */ +enum cppc_regs { + HIGHEST_PERF, + NOMINAL_PERF, + LOW_NON_LINEAR_PERF, + LOWEST_PERF, + GUARANTEED_PERF, + DESIRED_PERF, + MIN_PERF, + MAX_PERF, + PERF_REDUC_TOLERANCE, + TIME_WINDOW, + CTR_WRAP_TIME, + REFERENCE_CTR, + DELIVERED_CTR, + PERF_LIMITED, + ENABLE, + AUTO_SEL_ENABLE, + AUTO_ACT_WINDOW, + ENERGY_PERF, + REFERENCE_PERF, +}; + +/* + * Categorization of registers as described + * in the ACPI v.5.1 spec. + * XXX: Only filling up ones which are used by governors + * today. + */ +struct cppc_perf_caps { + u32 highest_perf; + u32 nominal_perf; + u32 reference_perf; + u32 lowest_perf; +}; + +struct cppc_perf_ctrls { + u32 max_perf; + u32 min_perf; + u32 desired_perf; +}; + +struct cppc_perf_fb_ctrs { + u64 reference; + u64 prev_reference; + u64 delivered; + u64 prev_delivered; +}; + +/* Per CPU container for runtime CPPC management. */ +struct cpudata { + int cpu; + struct cppc_perf_caps perf_caps; + struct cppc_perf_ctrls perf_ctrls; + struct cppc_perf_fb_ctrs perf_fb_ctrs; + struct cpufreq_policy *cur_policy; + unsigned int shared_type; + cpumask_var_t shared_cpu_map; +}; + +extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); +extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); +extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); +extern int acpi_get_psd_map(struct cpudata **); + +/* Methods to interact with the PCC mailbox controller. */ +extern struct mbox_chan * + pcc_mbox_request_channel(struct mbox_client *, unsigned int); +extern int mbox_send_message(struct mbox_chan *chan, void *mssg); + +#endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index ec00e2bb029e..056f245ad038 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -142,7 +142,7 @@ #ifdef ACPI_LIBRARY #define ACPI_USE_LOCAL_CACHE -#define ACPI_FUTURE_USAGE +#define ACPI_FULL_DEBUG #endif /* Common for all ACPICA applications */ @@ -304,11 +304,11 @@ * multi-threaded if ACPI_APPLICATION is not set. */ #ifndef DEBUGGER_THREADING -#ifdef ACPI_APPLICATION -#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED +#if !defined (ACPI_APPLICATION) || defined (ACPI_EXEC_APP) +#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED #else -#define DEBUGGER_THREADING DEBUGGER_MULTI_THREADED +#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED #endif #endif /* !DEBUGGER_THREADING */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 74ba46c8157a..323e5daece54 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -63,12 +63,16 @@ #define ACPI_USE_SYSTEM_INTTYPES -/* Compile for reduced hardware mode only with this kernel config */ +/* Kernel specific ACPICA configuration */ #ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY #define ACPI_REDUCED_HARDWARE 1 #endif +#ifdef CONFIG_ACPI_DEBUGGER +#define ACPI_DEBUGGER +#endif + #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> @@ -151,7 +155,6 @@ * OSL interfaces used by utilities */ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_redirect_output -#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_name #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_index #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_table_by_address diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index acedc3f026de..fd6d70fe1219 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -124,6 +124,11 @@ static inline acpi_thread_id acpi_os_get_thread_id(void) lock ? AE_OK : AE_NO_MEMORY; \ }) +static inline u8 acpi_os_readable(void *pointer, acpi_size length) +{ + return TRUE; +} + /* * OSL interfaces added by Linux */ diff --git a/include/acpi/processor.h b/include/acpi/processor.h index ff5f135f16b1..07fb100bcc68 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -311,6 +311,20 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); +#ifdef CONFIG_ACPI_CPPC_LIB +extern int acpi_cppc_processor_probe(struct acpi_processor *pr); +extern void acpi_cppc_processor_exit(struct acpi_processor *pr); +#else +static inline int acpi_cppc_processor_probe(struct acpi_processor *pr) +{ + return 0; +} +static inline void acpi_cppc_processor_exit(struct acpi_processor *pr) +{ + return; +} +#endif /* CONFIG_ACPI_CPPC_LIB */ + /* in processor_pdc.c */ void acpi_processor_set_pdc(acpi_handle handle); diff --git a/include/acpi/video.h b/include/acpi/video.h index e840b294c6f5..c62392d9b52a 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -24,7 +24,7 @@ enum acpi_backlight_type { acpi_backlight_native, }; -#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) +#if IS_ENABLED(CONFIG_ACPI_VIDEO) extern int acpi_video_register(void); extern void acpi_video_unregister(void); extern int acpi_video_get_edid(struct acpi_device *device, int type, diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index a94cbebbc33d..eb1973bad80b 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -35,7 +35,7 @@ typedef atomic_t atomic_long_t; #endif #define ATOMIC_LONG_READ_OP(mo) \ -static inline long atomic_long_read##mo(atomic_long_t *l) \ +static inline long atomic_long_read##mo(const atomic_long_t *l) \ { \ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ \ @@ -112,19 +112,23 @@ static inline void atomic_long_dec(atomic_long_t *l) ATOMIC_LONG_PFX(_dec)(v); } -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - ATOMIC_LONG_PFX(_add)(i, v); +#define ATOMIC_LONG_OP(op) \ +static inline void \ +atomic_long_##op(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + ATOMIC_LONG_PFX(_##op)(i, v); \ } -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; +ATOMIC_LONG_OP(add) +ATOMIC_LONG_OP(sub) +ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(or) +ATOMIC_LONG_OP(xor) +ATOMIC_LONG_OP(andnot) - ATOMIC_LONG_PFX(_sub)(i, v); -} +#undef ATOMIC_LONG_OP static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) { @@ -154,19 +158,24 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l) return ATOMIC_LONG_PFX(_add_negative)(i, v); } -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - return (long)ATOMIC_LONG_PFX(_inc_return)(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - - return (long)ATOMIC_LONG_PFX(_dec_return)(v); +#define ATOMIC_LONG_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_##op##_return##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \ } +ATOMIC_LONG_INC_DEC_OP(inc,) +ATOMIC_LONG_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_INC_DEC_OP(inc, _release) +ATOMIC_LONG_INC_DEC_OP(dec,) +ATOMIC_LONG_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_INC_DEC_OP(dec, _release) + +#undef ATOMIC_LONG_INC_DEC_OP static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) { diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index d4d7e337fdcb..74f1a3704d7a 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -127,7 +127,7 @@ ATOMIC_OP(xor, ^) * Atomically reads the value of @v. */ #ifndef atomic_read -#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic_read(v) READ_ONCE((v)->counter) #endif /** @@ -137,7 +137,7 @@ ATOMIC_OP(xor, ^) * * Atomically sets the value of @v to @i. */ -#define atomic_set(v, i) (((v)->counter) = (i)) +#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #include <linux/irqflags.h> diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index f20f407ce45d..4b4b056a6eb0 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -73,7 +73,7 @@ * Convert a physical address to a Page Frame Number and back */ #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) +#define __pfn_to_phys(pfn) PFN_PHYS(pfn) #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h index d4f9fb4e53df..fd694cfd678a 100644 --- a/include/asm-generic/mutex-dec.h +++ b/include/asm-generic/mutex-dec.h @@ -20,7 +20,7 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - if (unlikely(atomic_dec_return(count) < 0)) + if (unlikely(atomic_dec_return_acquire(count) < 0)) fail_fn(count); } @@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count) { - if (unlikely(atomic_dec_return(count) < 0)) + if (unlikely(atomic_dec_return_acquire(count) < 0)) return -1; return 0; } @@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - if (unlikely(atomic_inc_return(count) <= 0)) + if (unlikely(atomic_inc_return_release(count) <= 0)) fail_fn(count); } @@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - if (likely(atomic_cmpxchg(count, 1, 0) == 1)) + if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1)) return 1; return 0; } diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index f169ec064785..a6b4a7bd6ac9 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) * to ensure that any waiting tasks are woken up by the * unlock slow path. */ - if (likely(atomic_xchg(count, -1) != 1)) + if (likely(atomic_xchg_acquire(count, -1) != 1)) fail_fn(count); } @@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count) { - if (unlikely(atomic_xchg(count, 0) != 1)) + if (unlikely(atomic_xchg_acquire(count, 0) != 1)) if (likely(atomic_xchg(count, -1) != 1)) return -1; return 0; @@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - if (unlikely(atomic_xchg(count, 1) != 0)) + if (unlikely(atomic_xchg_release(count, 1) != 0)) fail_fn(count); } @@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int prev = atomic_xchg(count, 0); + int prev = atomic_xchg_acquire(count, 0); if (unlikely(prev < 0)) { /* @@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) * owner's unlock path needlessly, but that's not a problem * in practice. ] */ - prev = atomic_xchg(count, prev); + prev = atomic_xchg_acquire(count, prev); if (prev < 0) prev = 0; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 29c57b2cb344..14b0ff32fb9f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -30,9 +30,19 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -64,12 +74,12 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } -#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#else static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - BUG(); + BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -81,8 +91,21 @@ int ptep_clear_flush_young(struct vm_area_struct *vma, #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH -int pmdp_clear_flush_young(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR @@ -175,11 +198,11 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } -#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +#else static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - BUG(); + BUILD_BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -248,7 +271,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { - BUG(); + BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -482,6 +505,16 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) return pmd; } +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { return pte; diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 0bec580a4885..5d8ffa3e6f8c 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -24,7 +24,7 @@ static __always_inline void preempt_count_set(int pc) * must be macros to avoid header recursion hell */ #define init_task_preempt_count(p) do { \ - task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ + task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \ } while (0) #define init_idle_preempt_count(p, cpu) do { \ diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index 4d76f24df518..0abc6b6062fb 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -10,12 +10,12 @@ typedef struct qrwlock { atomic_t cnts; - arch_spinlock_t lock; + arch_spinlock_t wait_lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { \ .cnts = ATOMIC_INIT(0), \ - .lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ } #endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */ diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 83bfb87f5bf1..e2aadbc7151f 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock) cpu_relax(); } -#ifndef virt_queued_spin_lock -static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock) +#ifndef virt_spin_lock +static __always_inline bool virt_spin_lock(struct qspinlock *lock) { return false; } diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h index d48bf5a95cc1..d6d5dc98d7da 100644 --- a/include/asm-generic/rwsem.h +++ b/include/asm-generic/rwsem.h @@ -33,7 +33,7 @@ */ static inline void __down_read(struct rw_semaphore *sem) { - if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) + if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0)) rwsem_down_read_failed(sem); } @@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) long tmp; while ((tmp = sem->count) >= 0) { - if (tmp == cmpxchg(&sem->count, tmp, + if (tmp == cmpxchg_acquire(&sem->count, tmp, tmp + RWSEM_ACTIVE_READ_BIAS)) { return 1; } @@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) { long tmp; - tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, (atomic_long_t *)&sem->count); if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) rwsem_down_write_failed(sem); @@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) { long tmp; - tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); return tmp == RWSEM_UNLOCKED_VALUE; } @@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem) { long tmp; - tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); + tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count); if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) rwsem_wake(sem); } @@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem) */ static inline void __up_write(struct rw_semaphore *sem) { - if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, + if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, (atomic_long_t *)&sem->count) < 0)) rwsem_wake(sem); } @@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem) { long tmp; - tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, + /* + * When downgrading from exclusive to shared ownership, + * anything inside the write-locked region cannot leak + * into the read side. In contrast, anything in the + * read-locked region is ok to be re-ordered into the + * write side. As such, rely on RELEASE semantics. + */ + tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, (atomic_long_t *)&sem->count); if (tmp < 0) rwsem_downgrade_wake(sem); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 1781e54ea6d3..c4bd0e2c173c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -181,6 +181,16 @@ #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) +#ifdef CONFIG_ACPI +#define ACPI_PROBE_TABLE(name) \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ + *(__##name##_acpi_probe_table) \ + VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; +#else +#define ACPI_PROBE_TABLE(name) +#endif + #define KERNEL_DTB() \ STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__dtb_start) = .; \ @@ -514,6 +524,8 @@ CPUIDLE_METHOD_OF_TABLES() \ KERNEL_DTB() \ IRQCHIP_OF_MATCH_TABLE() \ + ACPI_PROBE_TABLE(irqchip) \ + ACPI_PROBE_TABLE(clksrc) \ EARLYCON_TABLE() \ EARLYCON_OF_TABLES() diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,15 +1,10 @@ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H -/* - * This says "generic", but it's actually big-endian only. - * Little-endian can use more efficient versions of these - * interfaces, see for example - * arch/x86/include/asm/word-at-a-time.h - * for those. - */ - #include <linux/kernel.h> +#include <asm/byteorder.h> + +#ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; @@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 69d163e39101..45cd5b328040 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -18,21 +18,21 @@ * struct akcipher_request - public key request * * @base: Common attributes for async crypto requests - * @src: Pointer to memory containing the input parameters - * The format of the parameter(s) is expeted to be Octet String - * @dst: Pointer to memory whare the result will be stored - * @src_len: Size of the input parameter + * @src: Source data + * @dst: Destination data + * @src_len: Size of the input buffer * @dst_len: Size of the output buffer. It needs to be at leaset * as big as the expected result depending on the operation * After operation it will be updated with the acctual size of the - * result. In case of error, where the dst_len was insufficient, + * result. + * In case of error where the dst sgl size was insufficient, * it will be updated to the size required for the operation. * @__ctx: Start of private context data */ struct akcipher_request { struct crypto_async_request base; - void *src; - void *dst; + struct scatterlist *src; + struct scatterlist *dst; unsigned int src_len; unsigned int dst_len; void *__ctx[] CRYPTO_MINALIGN_ATTR; @@ -67,8 +67,13 @@ struct crypto_akcipher { * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the * operation - * @setkey: Function invokes the algorithm specific set key function, which - * knows how to decode and interpret the BER encoded key + * @set_pub_key: Function invokes the algorithm specific set public key + * function, which knows how to decode and interpret + * the BER encoded public key + * @set_priv_key: Function invokes the algorithm specific set private key + * function, which knows how to decode and interpret + * the BER encoded private key + * @max_size: Function returns dest buffer size reqired for a given key. * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -89,8 +94,11 @@ struct akcipher_alg { int (*verify)(struct akcipher_request *req); int (*encrypt)(struct akcipher_request *req); int (*decrypt)(struct akcipher_request *req); - int (*setkey)(struct crypto_akcipher *tfm, const void *key, - unsigned int keylen); + int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen); + int (*max_size)(struct crypto_akcipher *tfm); int (*init)(struct crypto_akcipher *tfm); void (*exit)(struct crypto_akcipher *tfm); @@ -229,14 +237,14 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, * Sets parameters required by crypto operation * * @req: public key request - * @src: ptr to input parameter - * @dst: ptr of output parameter - * @src_len: size of the input buffer - * @dst_len: size of the output buffer. It will be updated by the - * implementation to reflect the acctual size of the result + * @src: ptr to input scatter list + * @dst: ptr to output scatter list + * @src_len: size of the src input scatter list to be processed + * @dst_len: size of the dst output scatter list */ static inline void akcipher_request_set_crypt(struct akcipher_request *req, - void *src, void *dst, + struct scatterlist *src, + struct scatterlist *dst, unsigned int src_len, unsigned int dst_len) { @@ -247,6 +255,22 @@ static inline void akcipher_request_set_crypt(struct akcipher_request *req, } /** + * crypto_akcipher_maxsize() -- Get len for output buffer + * + * Function returns the dest buffer size required for a given key + * + * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * Return: minimum len for output buffer or error code in key hasn't been set + */ +static inline int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->max_size(tfm); +} + +/** * crypto_akcipher_encrypt() -- Invoke public key encrypt operation * * Function invokes the specific public key encrypt operation for a given @@ -319,22 +343,44 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req) } /** - * crypto_akcipher_setkey() -- Invoke public key setkey operation + * crypto_akcipher_set_pub_key() -- Invoke set public key operation + * + * Function invokes the algorithm specific set key function, which knows + * how to decode and interpret the encoded key + * + * @tfm: tfm handle + * @key: BER encoded public key + * @keylen: length of the key + * + * Return: zero on success; error code in case of error + */ +static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) +{ + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + + return alg->set_pub_key(tfm, key, keylen); +} + +/** + * crypto_akcipher_set_priv_key() -- Invoke set private key operation * * Function invokes the algorithm specific set key function, which knows * how to decode and interpret the encoded key * * @tfm: tfm handle - * @key: BER encoded private or public key + * @key: BER encoded private key * @keylen: length of the key * * Return: zero on success; error code in case of error */ -static inline int crypto_akcipher_setkey(struct crypto_akcipher *tfm, void *key, - unsigned int keylen) +static inline int crypto_akcipher_set_priv_key(struct crypto_akcipher *tfm, + const void *key, + unsigned int keylen) { struct akcipher_alg *alg = crypto_akcipher_alg(tfm); - return alg->setkey(tfm, key, keylen); + return alg->set_priv_key(tfm, key, keylen); } #endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 8e920b44c0ac..3d69c93d50e8 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -264,6 +264,20 @@ static inline unsigned int crypto_ahash_alignmask( return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); } +/** + * crypto_ahash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ +static inline unsigned int crypto_ahash_blocksize(struct crypto_ahash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); +} + static inline struct hash_alg_common *__crypto_hash_alg_common( struct crypto_alg *alg) { diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index a8c86365439f..f997e2d29b5a 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -20,8 +20,11 @@ struct rsa_key { MPI d; }; -int rsa_parse_key(struct rsa_key *rsa_key, const void *key, - unsigned int key_len); +int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); + +int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, + unsigned int key_len); void rsa_free_key(struct rsa_key *rsa_key); #endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index 067c242b1e15..cc2516df0efa 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -15,7 +15,6 @@ #define _LINUX_PUBLIC_KEY_H #include <linux/mpi.h> -#include <keys/asymmetric-type.h> #include <crypto/hash_info.h> enum pkey_algo { diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -568,6 +568,10 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..5340099741ae 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write { u8 *bytes; }; +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 struct drm_dp_remote_i2c_read { u8 num_transactions; u8 port_number; @@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read { u8 *bytes; u8 no_stop_bit; u8 i2c_transaction_delay; - } transactions[4]; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; u8 read_i2c_device_id; u8 num_bytes_read; }; @@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index b2d56dd483d9..89dc7d6bc1cc 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -24,8 +24,18 @@ #ifndef _I915_COMPONENT_H_ #define _I915_COMPONENT_H_ +/* MAX_PORT is the number of port + * It must be sync with I915_MAX_PORTS defined i915_drv.h + * 5 should be enough as only HSW, BDW, SKL need such fix. + */ +#define MAX_PORTS 5 + struct i915_audio_component { struct device *dev; + /** + * @aud_sample_rate: the array of audio sample rate per port + */ + int aud_sample_rate[MAX_PORTS]; const struct i915_audio_component_ops { struct module *owner; @@ -33,6 +43,13 @@ struct i915_audio_component { void (*put_power)(struct device *); void (*codec_wake_override)(struct device *, bool enable); int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ + int (*sync_audio_rate)(struct device *, int port, int rate); } *ops; const struct i915_audio_component_audio_ops { diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index 0b4cb999a3f7..ab3ee241d10c 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -18,5 +18,6 @@ #define AT91_PMC_MOSCSELS 16 /* Main Oscillator Selection */ #define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */ #define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */ +#define AT91_PMC_GCKRDY 24 /* Generated Clocks */ #endif diff --git a/include/dt-bindings/clock/bcm-ns2.h b/include/dt-bindings/clock/bcm-ns2.h new file mode 100644 index 000000000000..d99c7a2e70cb --- /dev/null +++ b/include/dt-bindings/clock/bcm-ns2.h @@ -0,0 +1,72 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_NS2_H +#define _CLOCK_BCM_NS2_H + +/* GENPLL SCR clock channel ID */ +#define BCM_NS2_GENPLL_SCR 0 +#define BCM_NS2_GENPLL_SCR_SCR_CLK 1 +#define BCM_NS2_GENPLL_SCR_FS_CLK 2 +#define BCM_NS2_GENPLL_SCR_AUDIO_CLK 3 +#define BCM_NS2_GENPLL_SCR_CH3_UNUSED 4 +#define BCM_NS2_GENPLL_SCR_CH4_UNUSED 5 +#define BCM_NS2_GENPLL_SCR_CH5_UNUSED 6 + +/* GENPLL SW clock channel ID */ +#define BCM_NS2_GENPLL_SW 0 +#define BCM_NS2_GENPLL_SW_RPE_CLK 1 +#define BCM_NS2_GENPLL_SW_250_CLK 2 +#define BCM_NS2_GENPLL_SW_NIC_CLK 3 +#define BCM_NS2_GENPLL_SW_CHIMP_CLK 4 +#define BCM_NS2_GENPLL_SW_PORT_CLK 5 +#define BCM_NS2_GENPLL_SW_SDIO_CLK 6 + +/* LCPLL DDR clock channel ID */ +#define BCM_NS2_LCPLL_DDR 0 +#define BCM_NS2_LCPLL_DDR_PCIE_SATA_USB_CLK 1 +#define BCM_NS2_LCPLL_DDR_DDR_CLK 2 +#define BCM_NS2_LCPLL_DDR_CH2_UNUSED 3 +#define BCM_NS2_LCPLL_DDR_CH3_UNUSED 4 +#define BCM_NS2_LCPLL_DDR_CH4_UNUSED 5 +#define BCM_NS2_LCPLL_DDR_CH5_UNUSED 6 + +/* LCPLL PORTS clock channel ID */ +#define BCM_NS2_LCPLL_PORTS 0 +#define BCM_NS2_LCPLL_PORTS_WAN_CLK 1 +#define BCM_NS2_LCPLL_PORTS_RGMII_CLK 2 +#define BCM_NS2_LCPLL_PORTS_CH2_UNUSED 3 +#define BCM_NS2_LCPLL_PORTS_CH3_UNUSED 4 +#define BCM_NS2_LCPLL_PORTS_CH4_UNUSED 5 +#define BCM_NS2_LCPLL_PORTS_CH5_UNUSED 6 + +#endif /* _CLOCK_BCM_NS2_H */ diff --git a/include/dt-bindings/clock/bcm-nsp.h b/include/dt-bindings/clock/bcm-nsp.h new file mode 100644 index 000000000000..ad5827cde782 --- /dev/null +++ b/include/dt-bindings/clock/bcm-nsp.h @@ -0,0 +1,51 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_NSP_H +#define _CLOCK_BCM_NSP_H + +/* GENPLL clock channel ID */ +#define BCM_NSP_GENPLL 0 +#define BCM_NSP_GENPLL_PHY_CLK 1 +#define BCM_NSP_GENPLL_ENET_SW_CLK 2 +#define BCM_NSP_GENPLL_USB_PHY_REF_CLK 3 +#define BCM_NSP_GENPLL_IPROCFAST_CLK 4 +#define BCM_NSP_GENPLL_SATA1_CLK 5 +#define BCM_NSP_GENPLL_SATA2_CLK 6 + +/* LCPLL0 clock channel ID */ +#define BCM_NSP_LCPLL0 0 +#define BCM_NSP_LCPLL0_PCIE_PHY_REF_CLK 1 +#define BCM_NSP_LCPLL0_SDIO_CLK 2 +#define BCM_NSP_LCPLL0_DDR_PHY_CLK 3 + +#endif /* _CLOCK_BCM_NSP_H */ diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h new file mode 100644 index 000000000000..d323efac7edf --- /dev/null +++ b/include/dt-bindings/clock/bcm2835.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define BCM2835_PLLA 0 +#define BCM2835_PLLB 1 +#define BCM2835_PLLC 2 +#define BCM2835_PLLD 3 +#define BCM2835_PLLH 4 + +#define BCM2835_PLLA_CORE 5 +#define BCM2835_PLLA_PER 6 +#define BCM2835_PLLB_ARM 7 +#define BCM2835_PLLC_CORE0 8 +#define BCM2835_PLLC_CORE1 9 +#define BCM2835_PLLC_CORE2 10 +#define BCM2835_PLLC_PER 11 +#define BCM2835_PLLD_CORE 12 +#define BCM2835_PLLD_PER 13 +#define BCM2835_PLLH_RCAL 14 +#define BCM2835_PLLH_AUX 15 +#define BCM2835_PLLH_PIX 16 + +#define BCM2835_CLOCK_TIMER 17 +#define BCM2835_CLOCK_OTP 18 +#define BCM2835_CLOCK_UART 19 +#define BCM2835_CLOCK_VPU 20 +#define BCM2835_CLOCK_V3D 21 +#define BCM2835_CLOCK_ISP 22 +#define BCM2835_CLOCK_H264 23 +#define BCM2835_CLOCK_VEC 24 +#define BCM2835_CLOCK_HSM 25 +#define BCM2835_CLOCK_SDRAM 26 +#define BCM2835_CLOCK_TSENS 27 +#define BCM2835_CLOCK_EMMC 28 +#define BCM2835_CLOCK_PERI_IMAGE 29 + +#define BCM2835_CLOCK_COUNT 30 diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index e33c75a3c09d..10c558611085 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -21,7 +21,18 @@ #define ACLK_MSCL_532 8 #define DOUT_SCLK_AUD_PLL 9 #define FOUT_AUD_PLL 10 -#define TOPC_NR_CLK 11 +#define SCLK_AUD_PLL 11 +#define SCLK_MFC_PLL_B 12 +#define SCLK_MFC_PLL_A 13 +#define SCLK_BUS1_PLL_B 14 +#define SCLK_BUS1_PLL_A 15 +#define SCLK_BUS0_PLL_B 16 +#define SCLK_BUS0_PLL_A 17 +#define SCLK_CC_PLL_B 18 +#define SCLK_CC_PLL_A 19 +#define ACLK_CCORE_133 20 +#define ACLK_PERIS_66 21 +#define TOPC_NR_CLK 22 /* TOP0 */ #define DOUT_ACLK_PERIC1 1 @@ -38,7 +49,9 @@ #define CLK_SCLK_SPDIF 12 #define CLK_SCLK_PCM1 13 #define CLK_SCLK_I2S1 14 -#define TOP0_NR_CLK 15 +#define CLK_ACLK_PERIC0_66 15 +#define CLK_ACLK_PERIC1_66 16 +#define TOP0_NR_CLK 17 /* TOP1 */ #define DOUT_ACLK_FSYS1_200 1 @@ -49,7 +62,16 @@ #define CLK_SCLK_MMC2 6 #define CLK_SCLK_MMC1 7 #define CLK_SCLK_MMC0 8 -#define TOP1_NR_CLK 9 +#define CLK_ACLK_FSYS0_200 9 +#define CLK_ACLK_FSYS1_200 10 +#define CLK_SCLK_PHY_FSYS1 11 +#define CLK_SCLK_PHY_FSYS1_26M 12 +#define MOUT_SCLK_UFSUNIPRO20 13 +#define DOUT_SCLK_UFSUNIPRO20 14 +#define CLK_SCLK_UFSUNIPRO20 15 +#define DOUT_SCLK_PHY_FSYS1 16 +#define DOUT_SCLK_PHY_FSYS1_26M 17 +#define TOP1_NR_CLK 18 /* CCORE */ #define PCLK_RTC 1 @@ -124,7 +146,20 @@ /* FSYS1 */ #define ACLK_MMC1 1 #define ACLK_MMC0 2 -#define FSYS1_NR_CLK 3 +#define PHYCLK_UFS20_TX0_SYMBOL 3 +#define PHYCLK_UFS20_RX0_SYMBOL 4 +#define PHYCLK_UFS20_RX1_SYMBOL 5 +#define ACLK_UFS20_LINK 6 +#define SCLK_UFSUNIPRO20_USER 7 +#define PHYCLK_UFS20_RX1_SYMBOL_USER 8 +#define PHYCLK_UFS20_RX0_SYMBOL_USER 9 +#define PHYCLK_UFS20_TX0_SYMBOL_USER 10 +#define OSCCLK_PHY_CLKOUT_EMBEDDED_COMBO_PHY 11 +#define SCLK_COMBO_PHY_EMBEDDED_26M 12 +#define DOUT_PCLK_FSYS1 13 +#define PCLK_GPIO_FSYS1 14 +#define MOUT_FSYS1_PHYCLK_SEL1 15 +#define FSYS1_NR_CLK 16 /* MSCL */ #define USERMUX_ACLK_MSCL_532 1 diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index 8de173ff19f3..77985cc43316 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -254,6 +254,7 @@ #define IMX6QDL_CLK_CAAM_MEM 241 #define IMX6QDL_CLK_CAAM_ACLK 242 #define IMX6QDL_CLK_CAAM_IPG 243 -#define IMX6QDL_CLK_END 244 +#define IMX6QDL_CLK_SPDIF_GCLK 244 +#define IMX6QDL_CLK_END 245 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h index 9ce4e421096f..e14573e293c5 100644 --- a/include/dt-bindings/clock/imx6sl-clock.h +++ b/include/dt-bindings/clock/imx6sl-clock.h @@ -174,6 +174,7 @@ #define IMX6SL_CLK_SSI1_IPG 161 #define IMX6SL_CLK_SSI2_IPG 162 #define IMX6SL_CLK_SSI3_IPG 163 -#define IMX6SL_CLK_END 164 +#define IMX6SL_CLK_SPDIF_GCLK 164 +#define IMX6SL_CLK_END 165 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h index 995709119ec5..36f0324902a5 100644 --- a/include/dt-bindings/clock/imx6sx-clock.h +++ b/include/dt-bindings/clock/imx6sx-clock.h @@ -274,6 +274,7 @@ #define IMX6SX_PLL5_BYPASS 261 #define IMX6SX_PLL6_BYPASS 262 #define IMX6SX_PLL7_BYPASS 263 -#define IMX6SX_CLK_CLK_END 264 +#define IMX6SX_CLK_SPDIF_GCLK 264 +#define IMX6SX_CLK_CLK_END 265 #endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index 728df28b00d5..a4a7a9ce3457 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -446,5 +446,6 @@ #define IMX7D_MU_ROOT_CLK 433 #define IMX7D_SEMA4_HS_ROOT_CLK 434 #define IMX7D_PLL_DRAM_TEST_DIV 435 -#define IMX7D_CLK_END 436 +#define IMX7D_ADC_ROOT_CLK 436 +#define IMX7D_CLK_END 437 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h index 4ad76ed882ad..7956ba1bc974 100644 --- a/include/dt-bindings/clock/mt8173-clk.h +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -18,7 +18,6 @@ /* TOPCKGEN */ #define CLK_TOP_CLKPH_MCK_O 1 -#define CLK_TOP_DPI 2 #define CLK_TOP_USB_SYSPLL_125M 3 #define CLK_TOP_HDMITX_DIG_CTS 4 #define CLK_TOP_ARMCA7PLL_754M 5 @@ -154,12 +153,16 @@ #define CLK_TOP_I2S2_M_SEL 135 #define CLK_TOP_I2S3_M_SEL 136 #define CLK_TOP_I2S3_B_SEL 137 -#define CLK_TOP_NR_CLK 138 +#define CLK_TOP_DSI0_DIG 138 +#define CLK_TOP_DSI1_DIG 139 +#define CLK_TOP_LVDS_PXL 140 +#define CLK_TOP_LVDS_CTS 141 +#define CLK_TOP_NR_CLK 142 /* APMIXED_SYS */ -#define CLK_APMIXED_ARMCA15PLL 1 -#define CLK_APMIXED_ARMCA7PLL 2 +#define CLK_APMIXED_ARMCA15PLL 1 +#define CLK_APMIXED_ARMCA7PLL 2 #define CLK_APMIXED_MAINPLL 3 #define CLK_APMIXED_UNIVPLL 4 #define CLK_APMIXED_MMPLL 5 @@ -172,7 +175,8 @@ #define CLK_APMIXED_APLL2 12 #define CLK_APMIXED_LVDSPLL 13 #define CLK_APMIXED_MSDCPLL2 14 -#define CLK_APMIXED_NR_CLK 15 +#define CLK_APMIXED_REF2USB_TX 15 +#define CLK_APMIXED_NR_CLK 16 /* INFRA_SYS */ @@ -187,7 +191,8 @@ #define CLK_INFRA_CEC 9 #define CLK_INFRA_PMICSPI 10 #define CLK_INFRA_PMICWRAP 11 -#define CLK_INFRA_NR_CLK 12 +#define CLK_INFRA_CLK_13M 12 +#define CLK_INFRA_NR_CLK 13 /* PERI_SYS */ @@ -232,4 +237,91 @@ #define CLK_PERI_UART3_SEL 39 #define CLK_PERI_NR_CLK 40 +/* IMG_SYS */ + +#define CLK_IMG_LARB2_SMI 1 +#define CLK_IMG_CAM_SMI 2 +#define CLK_IMG_CAM_CAM 3 +#define CLK_IMG_SEN_TG 4 +#define CLK_IMG_SEN_CAM 5 +#define CLK_IMG_CAM_SV 6 +#define CLK_IMG_FD 7 +#define CLK_IMG_NR_CLK 8 + +/* MM_SYS */ + +#define CLK_MM_SMI_COMMON 1 +#define CLK_MM_SMI_LARB0 2 +#define CLK_MM_CAM_MDP 3 +#define CLK_MM_MDP_RDMA0 4 +#define CLK_MM_MDP_RDMA1 5 +#define CLK_MM_MDP_RSZ0 6 +#define CLK_MM_MDP_RSZ1 7 +#define CLK_MM_MDP_RSZ2 8 +#define CLK_MM_MDP_TDSHP0 9 +#define CLK_MM_MDP_TDSHP1 10 +#define CLK_MM_MDP_WDMA 11 +#define CLK_MM_MDP_WROT0 12 +#define CLK_MM_MDP_WROT1 13 +#define CLK_MM_FAKE_ENG 14 +#define CLK_MM_MUTEX_32K 15 +#define CLK_MM_DISP_OVL0 16 +#define CLK_MM_DISP_OVL1 17 +#define CLK_MM_DISP_RDMA0 18 +#define CLK_MM_DISP_RDMA1 19 +#define CLK_MM_DISP_RDMA2 20 +#define CLK_MM_DISP_WDMA0 21 +#define CLK_MM_DISP_WDMA1 22 +#define CLK_MM_DISP_COLOR0 23 +#define CLK_MM_DISP_COLOR1 24 +#define CLK_MM_DISP_AAL 25 +#define CLK_MM_DISP_GAMMA 26 +#define CLK_MM_DISP_UFOE 27 +#define CLK_MM_DISP_SPLIT0 28 +#define CLK_MM_DISP_SPLIT1 29 +#define CLK_MM_DISP_MERGE 30 +#define CLK_MM_DISP_OD 31 +#define CLK_MM_DISP_PWM0MM 32 +#define CLK_MM_DISP_PWM026M 33 +#define CLK_MM_DISP_PWM1MM 34 +#define CLK_MM_DISP_PWM126M 35 +#define CLK_MM_DSI0_ENGINE 36 +#define CLK_MM_DSI0_DIGITAL 37 +#define CLK_MM_DSI1_ENGINE 38 +#define CLK_MM_DSI1_DIGITAL 39 +#define CLK_MM_DPI_PIXEL 40 +#define CLK_MM_DPI_ENGINE 41 +#define CLK_MM_DPI1_PIXEL 42 +#define CLK_MM_DPI1_ENGINE 43 +#define CLK_MM_HDMI_PIXEL 44 +#define CLK_MM_HDMI_PLLCK 45 +#define CLK_MM_HDMI_AUDIO 46 +#define CLK_MM_HDMI_SPDIF 47 +#define CLK_MM_LVDS_PIXEL 48 +#define CLK_MM_LVDS_CTS 49 +#define CLK_MM_SMI_LARB4 50 +#define CLK_MM_HDMI_HDCP 51 +#define CLK_MM_HDMI_HDCP24M 52 +#define CLK_MM_NR_CLK 53 + +/* VDEC_SYS */ + +#define CLK_VDEC_CKEN 1 +#define CLK_VDEC_LARB_CKEN 2 +#define CLK_VDEC_NR_CLK 3 + +/* VENC_SYS */ + +#define CLK_VENC_CKE0 1 +#define CLK_VENC_CKE1 2 +#define CLK_VENC_CKE2 3 +#define CLK_VENC_CKE3 4 +#define CLK_VENC_NR_CLK 5 + +/* VENCLT_SYS */ + +#define CLK_VENCLT_CKE0 1 +#define CLK_VENCLT_CKE1 2 +#define CLK_VENCLT_NR_CLK 3 + #endif /* _DT_BINDINGS_CLK_MT8173_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h index 2c0da566c46a..5aa7ebeae411 100644 --- a/include/dt-bindings/clock/qcom,gcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h @@ -348,4 +348,10 @@ #define GCC_PCIE_1_PIPE_CLK 331 #define GCC_PCIE_1_SLV_AXI_CLK 332 +/* gdscs */ +#define USB_HS_HSIC_GDSC 0 +#define PCIE0_GDSC 1 +#define PCIE1_GDSC 2 +#define USB30_GDSC 3 + #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h index e430f644dd6c..257e2fbedd94 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8916.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h @@ -152,5 +152,35 @@ #define GCC_VENUS0_AHB_CLK 135 #define GCC_VENUS0_AXI_CLK 136 #define GCC_VENUS0_VCODEC0_CLK 137 +#define BIMC_DDR_CLK_SRC 138 +#define GCC_APSS_TCU_CLK 139 +#define GCC_GFX_TCU_CLK 140 +#define BIMC_GPU_CLK_SRC 141 +#define GCC_BIMC_GFX_CLK 142 +#define GCC_BIMC_GPU_CLK 143 +#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144 +#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145 +#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146 +#define ULTAUDIO_XO_CLK_SRC 147 +#define ULTAUDIO_AHBFABRIC_CLK_SRC 148 +#define CODEC_DIGCODEC_CLK_SRC 149 +#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150 +#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151 +#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152 +#define GCC_ULTAUDIO_STC_XO_CLK 153 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155 +#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156 +#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157 +#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158 +#define GCC_CODEC_DIGCODEC_CLK 159 + +/* Indexes for GDSCs */ +#define BIMC_GDSC 0 +#define VENUS_GDSC 1 +#define MDSS_GDSC 2 +#define JPEG_GDSC 3 +#define VFE_GDSC 4 +#define OXILI_GDSC 5 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8974.h b/include/dt-bindings/clock/qcom,gcc-msm8974.h index 51e51c860fe6..81d32f639190 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8974.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8974.h @@ -321,4 +321,7 @@ #define GCC_SDCC1_CDCCAL_SLEEP_CLK 304 #define GCC_SDCC1_CDCCAL_FF_CLK 305 +/* gdscs */ +#define USB_HS_HSIC_GDSC 0 + #endif diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h index d72b5b35f15e..03861e3f498e 100644 --- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -180,4 +180,14 @@ #define VPU_SLEEP_CLK 163 #define VPU_VDP_CLK 164 +/* GDSCs */ +#define VENUS0_GDSC 0 +#define VENUS0_CORE0_GDSC 1 +#define VENUS0_CORE1_GDSC 2 +#define MDSS_GDSC 3 +#define CAMSS_JPEG_GDSC 4 +#define CAMSS_VFE_GDSC 5 +#define OXILI_GDSC 6 +#define OXILICX_GDSC 7 + #endif diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h index 032ed87ef0f3..28651e54c9ae 100644 --- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h +++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h @@ -158,4 +158,12 @@ #define SPDM_RM_AXI 141 #define SPDM_RM_OCMEMNOC 142 +/* gdscs */ +#define VENUS0_GDSC 0 +#define MDSS_GDSC 1 +#define CAMSS_JPEG_GDSC 2 +#define CAMSS_VFE_GDSC 3 +#define OXILI_GDSC 4 +#define OXILICX_GDSC 5 + #endif diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h new file mode 100644 index 000000000000..e864aae0a256 --- /dev/null +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* r8a7795 CPG Core Clocks */ +#define R8A7795_CLK_Z 0 +#define R8A7795_CLK_Z2 1 +#define R8A7795_CLK_ZR 2 +#define R8A7795_CLK_ZG 3 +#define R8A7795_CLK_ZTR 4 +#define R8A7795_CLK_ZTRD2 5 +#define R8A7795_CLK_ZT 6 +#define R8A7795_CLK_ZX 7 +#define R8A7795_CLK_S0D1 8 +#define R8A7795_CLK_S0D4 9 +#define R8A7795_CLK_S1D1 10 +#define R8A7795_CLK_S1D2 11 +#define R8A7795_CLK_S1D4 12 +#define R8A7795_CLK_S2D1 13 +#define R8A7795_CLK_S2D2 14 +#define R8A7795_CLK_S2D4 15 +#define R8A7795_CLK_S3D1 16 +#define R8A7795_CLK_S3D2 17 +#define R8A7795_CLK_S3D4 18 +#define R8A7795_CLK_LB 19 +#define R8A7795_CLK_CL 20 +#define R8A7795_CLK_ZB3 21 +#define R8A7795_CLK_ZB3D2 22 +#define R8A7795_CLK_CR 23 +#define R8A7795_CLK_CRD2 24 +#define R8A7795_CLK_SD0H 25 +#define R8A7795_CLK_SD0 26 +#define R8A7795_CLK_SD1H 27 +#define R8A7795_CLK_SD1 28 +#define R8A7795_CLK_SD2H 29 +#define R8A7795_CLK_SD2 30 +#define R8A7795_CLK_SD3H 31 +#define R8A7795_CLK_SD3 32 +#define R8A7795_CLK_SSP2 33 +#define R8A7795_CLK_SSP1 34 +#define R8A7795_CLK_SSPRS 35 +#define R8A7795_CLK_RPC 36 +#define R8A7795_CLK_RPCD2 37 +#define R8A7795_CLK_MSO 38 +#define R8A7795_CLK_CANFD 39 +#define R8A7795_CLK_HDMI 40 +#define R8A7795_CLK_CSI0 41 +#define R8A7795_CLK_CSIREF 42 +#define R8A7795_CLK_CP 43 +#define R8A7795_CLK_CPEX 44 +#define R8A7795_CLK_R 45 +#define R8A7795_CLK_OSC 46 + +#endif /* __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h new file mode 100644 index 000000000000..569a3cc33ffb --- /dev/null +++ b/include/dt-bindings/clock/renesas-cpg-mssr.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ + +#define CPG_CORE 0 /* Core Clock */ +#define CPG_MOD 1 /* Module Clock */ + +#endif /* __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/sun4i-a10-pll2.h b/include/dt-bindings/clock/sun4i-a10-pll2.h new file mode 100644 index 000000000000..071c8112d531 --- /dev/null +++ b/include/dt-bindings/clock/sun4i-a10-pll2.h @@ -0,0 +1,53 @@ +/* + * Copyright 2015 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is dual-licensed: you can use it either under the terms + * of the GPL or the X11 license, at your option. Note that this dual + * licensing only applies to this file, and not this project as a + * whole. + * + * a) This file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This file is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Or, alternatively, + * + * b) Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ +#define __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ + +#define SUN4I_A10_PLL2_1X 0 +#define SUN4I_A10_PLL2_2X 1 +#define SUN4I_A10_PLL2_4X 2 +#define SUN4I_A10_PLL2_8X 3 + +#endif /* __DT_BINDINGS_CLOCK_SUN4I_A10_PLL2_H_ */ diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h index d19763439472..56c16aaea112 100644 --- a/include/dt-bindings/clock/vf610-clock.h +++ b/include/dt-bindings/clock/vf610-clock.h @@ -194,6 +194,7 @@ #define VF610_PLL7_BYPASS 181 #define VF610_CLK_SNVS 182 #define VF610_CLK_DAP 183 -#define VF610_CLK_END 184 +#define VF610_CLK_OCOTP 184 +#define VF610_CLK_END 185 #endif /* __DT_BINDINGS_CLOCK_VF610_H */ diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h index e6b1e0a808ae..c673d2c87c60 100644 --- a/include/dt-bindings/gpio/gpio.h +++ b/include/dt-bindings/gpio/gpio.h @@ -9,7 +9,19 @@ #ifndef _DT_BINDINGS_GPIO_GPIO_H #define _DT_BINDINGS_GPIO_GPIO_H +/* Bit 0 express polarity */ #define GPIO_ACTIVE_HIGH 0 #define GPIO_ACTIVE_LOW 1 +/* Bit 1 express single-endedness */ +#define GPIO_PUSH_PULL 0 +#define GPIO_SINGLE_ENDED 2 + +/* + * Open Drain/Collector is the combination of single-ended active low, + * Open Source/Emitter is the combination of single-ended active high. + */ +#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_ACTIVE_LOW) +#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_ACTIVE_HIGH) + #endif diff --git a/include/dt-bindings/leds/leds-netxbig.h b/include/dt-bindings/leds/leds-netxbig.h new file mode 100644 index 000000000000..92658b0310b2 --- /dev/null +++ b/include/dt-bindings/leds/leds-netxbig.h @@ -0,0 +1,18 @@ +/* + * This header provides constants for netxbig LED bindings. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _DT_BINDINGS_LEDS_NETXBIG_H +#define _DT_BINDINGS_LEDS_NETXBIG_H + +#define NETXBIG_LED_OFF 0 +#define NETXBIG_LED_ON 1 +#define NETXBIG_LED_SATA 2 +#define NETXBIG_LED_TIMER1 3 +#define NETXBIG_LED_TIMER2 4 + +#endif /* _DT_BINDINGS_LEDS_NETXBIG_H */ diff --git a/include/dt-bindings/mfd/arizona.h b/include/dt-bindings/mfd/arizona.h index 7b2000cead43..c40f665e2712 100644 --- a/include/dt-bindings/mfd/arizona.h +++ b/include/dt-bindings/mfd/arizona.h @@ -107,5 +107,7 @@ #define ARIZONA_ACCDET_MODE_MIC 0 #define ARIZONA_ACCDET_MODE_HPL 1 #define ARIZONA_ACCDET_MODE_HPR 2 +#define ARIZONA_ACCDET_MODE_HPM 4 +#define ARIZONA_ACCDET_MODE_ADC 7 #endif diff --git a/include/dt-bindings/mfd/atmel-flexcom.h b/include/dt-bindings/mfd/atmel-flexcom.h new file mode 100644 index 000000000000..a266fe4ee945 --- /dev/null +++ b/include/dt-bindings/mfd/atmel-flexcom.h @@ -0,0 +1,26 @@ +/* + * This header provides macros for Atmel Flexcom DT bindings. + * + * Copyright (C) 2015 Cyrille Pitchen <cyrille.pitchen@atmel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __DT_BINDINGS_ATMEL_FLEXCOM_H__ +#define __DT_BINDINGS_ATMEL_FLEXCOM_H__ + +#define ATMEL_FLEXCOM_MODE_USART 1 +#define ATMEL_FLEXCOM_MODE_SPI 2 +#define ATMEL_FLEXCOM_MODE_TWI 3 + +#endif /* __DT_BINDINGS_ATMEL_FLEXCOM_H__ */ diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h index 4b840e822209..4915d40d3c3c 100644 --- a/include/keys/asymmetric-subtype.h +++ b/include/keys/asymmetric-subtype.h @@ -49,7 +49,7 @@ struct asymmetric_key_subtype { static inline struct asymmetric_key_subtype *asymmetric_key_subtype(const struct key *key) { - return key->type_data.p[0]; + return key->payload.data[asym_subtype]; } #endif /* _KEYS_ASYMMETRIC_SUBTYPE_H */ diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index c0754abb2f56..59c1df9cf922 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -19,6 +19,16 @@ extern struct key_type key_type_asymmetric; /* + * The key payload is four words. The asymmetric-type key uses them as + * follows: + */ +enum asymmetric_payload_bits { + asym_crypto, + asym_subtype, + asym_key_ids, +}; + +/* * Identifiers for an asymmetric key ID. We have three ways of looking up a * key derived from an X.509 certificate: * @@ -58,6 +68,11 @@ extern struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1, size_t len_1, const void *val_2, size_t len_2); +static inline +const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) +{ + return key->payload.data[asym_key_ids]; +} /* * The payload is at the discretion of the subtype. diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h index 56f82e5c9975..f91ecd9d1bb1 100644 --- a/include/keys/trusted-type.h +++ b/include/keys/trusted-type.h @@ -12,10 +12,12 @@ #include <linux/key.h> #include <linux/rcupdate.h> +#include <linux/tpm.h> #define MIN_KEY_SIZE 32 #define MAX_KEY_SIZE 128 -#define MAX_BLOB_SIZE 320 +#define MAX_BLOB_SIZE 512 +#define MAX_PCRINFO_SIZE 64 struct trusted_key_payload { struct rcu_head rcu; @@ -26,6 +28,16 @@ struct trusted_key_payload { unsigned char blob[MAX_BLOB_SIZE]; }; +struct trusted_key_options { + uint16_t keytype; + uint32_t keyhandle; + unsigned char keyauth[TPM_DIGEST_SIZE]; + unsigned char blobauth[TPM_DIGEST_SIZE]; + uint32_t pcrinfo_len; + unsigned char pcrinfo[MAX_PCRINFO_SIZE]; + int pcrlock; +}; + extern struct key_type key_type_trusted; #endif /* _KEYS_TRUSTED_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index cebefb069c44..c56fef40f53e 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -15,6 +15,8 @@ #include <linux/key.h> #include <linux/rcupdate.h> +#ifdef CONFIG_KEYS + /*****************************************************************************/ /* * the payload for a key of type "user" or "logon" @@ -46,5 +48,11 @@ extern void user_describe(const struct key *user, struct seq_file *m); extern long user_read(const struct key *key, char __user *buffer, size_t buflen); +static inline const struct user_key_payload *user_key_payload(const struct key *key) +{ + return (struct user_key_payload *)rcu_dereference_key(key); +} + +#endif /* CONFIG_KEYS */ #endif /* _KEYS_USER_TYPE_H */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index e1e4d7c38dda..1800227af9d6 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -51,7 +51,7 @@ struct arch_timer_cpu { bool armed; /* Timer IRQ */ - const struct kvm_irq_level *irq; + struct kvm_irq_level irq; /* VGIC mapping */ struct irq_phys_map *map; @@ -71,5 +71,7 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); +void kvm_timer_schedule(struct kvm_vcpu *vcpu); +void kvm_timer_unschedule(struct kvm_vcpu *vcpu); #endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d901f1a47be6..9c747cb14ad8 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -35,11 +35,7 @@ #define VGIC_V3_MAX_LRS 16 #define VGIC_MAX_IRQS 1024 #define VGIC_V2_MAX_CPUS 8 - -/* Sanity checks... */ -#if (KVM_MAX_VCPUS > 255) -#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now -#endif +#define VGIC_V3_MAX_CPUS 255 #if (VGIC_NR_IRQS_LEGACY & 31) #error "VGIC_NR_IRQS must be a multiple of 32" @@ -116,7 +112,6 @@ struct vgic_vmcr { struct vgic_ops { struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int); void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr); - void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu); void (*clear_eisr)(struct kvm_vcpu *vcpu); @@ -163,7 +158,6 @@ struct irq_phys_map { u32 virt_irq; u32 phys_irq; u32 irq; - bool active; }; struct irq_phys_map_entry { @@ -286,7 +280,7 @@ struct vgic_v2_cpu_if { }; struct vgic_v3_cpu_if { -#ifdef CONFIG_ARM_GIC_V3 +#ifdef CONFIG_KVM_ARM_VGIC_V3 u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_sre; /* Restored only, change ignored */ @@ -300,22 +294,16 @@ struct vgic_v3_cpu_if { }; struct vgic_cpu { - /* per IRQ to LR mapping */ - u8 *vgic_irq_lr_map; - /* Pending/active/both interrupts on this VCPU */ - DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS); - DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS); - DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS); + DECLARE_BITMAP(pending_percpu, VGIC_NR_PRIVATE_IRQS); + DECLARE_BITMAP(active_percpu, VGIC_NR_PRIVATE_IRQS); + DECLARE_BITMAP(pend_act_percpu, VGIC_NR_PRIVATE_IRQS); /* Pending/active/both shared interrupts, dynamically sized */ unsigned long *pending_shared; unsigned long *active_shared; unsigned long *pend_act_shared; - /* Bitmap of used/free list registers */ - DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); - /* Number of list registers on this CPU */ int nr_lr; @@ -358,8 +346,6 @@ int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int irq); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); -bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map); -void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) @@ -368,7 +354,7 @@ void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active); int vgic_v2_probe(struct device_node *vgic_node, const struct vgic_ops **ops, const struct vgic_params **params); -#ifdef CONFIG_ARM_GIC_V3 +#ifdef CONFIG_KVM_ARM_VGIC_V3 int vgic_v3_probe(struct device_node *vgic_node, const struct vgic_ops **ops, const struct vgic_params **params); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..d6f95bb481d4 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -49,7 +49,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) return adev ? adev->handle : NULL; } -#define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode) +#define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) @@ -69,7 +69,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) static inline bool has_acpi_companion(struct device *dev) { - return is_acpi_node(dev->fwnode); + return is_acpi_device_node(dev->fwnode); } static inline void acpi_preset_companion(struct device *dev, @@ -131,6 +131,12 @@ static inline void acpi_initrd_override(void *data, size_t size) (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) +struct acpi_subtable_proc { + int id; + acpi_tbl_entry_handler handler; + int count; +}; + char * __acpi_map_table (unsigned long phys_addr, unsigned long size); void __acpi_unmap_table(char *map, unsigned long size); int early_acpi_boot_init(void); @@ -146,9 +152,16 @@ int __init acpi_parse_entries(char *id, unsigned long table_size, struct acpi_table_header *table_header, int entry_id, unsigned int max_entries); int __init acpi_table_parse_entries(char *id, unsigned long table_size, - int entry_id, - acpi_tbl_entry_handler handler, - unsigned int max_entries); + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int __init acpi_table_parse_entries(char *id, unsigned long table_size, + int entry_id, + acpi_tbl_entry_handler handler, + unsigned int max_entries); +int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, + struct acpi_subtable_proc *proc, int proc_num, + unsigned int max_entries); int acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries); @@ -193,6 +206,12 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; +extern unsigned int acpi_sci_irq; +#define INVALID_ACPI_IRQ ((unsigned)-1) +static inline bool acpi_sci_irq_valid(void) +{ + return acpi_sci_irq != INVALID_ACPI_IRQ; +} extern int sbf_port; extern unsigned long acpi_realmode_flags; @@ -201,6 +220,9 @@ int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); +void acpi_set_irq_model(enum acpi_irq_model_id model, + struct fwnode_handle *fwnode); + #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else @@ -217,6 +239,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); @@ -461,7 +484,22 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode) return false; } -static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) +static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) { return NULL; } @@ -743,22 +781,76 @@ struct acpi_reference_args { #ifdef CONFIG_ACPI int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); -int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, - acpi_object_type type, - const union acpi_object **obj); -int acpi_dev_get_property_reference(struct acpi_device *adev, - const char *name, size_t index, - struct acpi_reference_args *args); - -int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, - void **valptr); +int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, size_t index, + struct acpi_reference_args *args); + +int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, + void **valptr); int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, void *val); +int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, void *val, size_t nval); -struct acpi_device *acpi_get_next_child(struct device *dev, - struct acpi_device *child); +struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + struct fwnode_handle *subnode); + +struct acpi_probe_entry; +typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, + struct acpi_probe_entry *); + +#define ACPI_TABLE_ID_LEN 5 + +/** + * struct acpi_probe_entry - boot-time probing entry + * @id: ACPI table name + * @type: Optional subtable type to match + * (if @id contains subtables) + * @subtable_valid: Optional callback to check the validity of + * the subtable + * @probe_table: Callback to the driver being probed when table + * match is successful + * @probe_subtbl: Callback to the driver being probed when table and + * subtable match (and optional callback is successful) + * @driver_data: Sideband data provided back to the driver + */ +struct acpi_probe_entry { + __u8 id[ACPI_TABLE_ID_LEN]; + __u8 type; + acpi_probe_entry_validate_subtbl subtable_valid; + union { + acpi_tbl_table_handler probe_table; + acpi_tbl_entry_handler probe_subtbl; + }; + kernel_ulong_t driver_data; +}; + +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) \ + = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_table = (acpi_tbl_table_handler)fn, \ + .driver_data = data, \ + } + +#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table +#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end + +int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); + +#define acpi_probe_device_table(t) \ + ({ \ + extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ + ACPI_PROBE_TABLE_END(t); \ + __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ + (&ACPI_PROBE_TABLE_END(t) - \ + &ACPI_PROBE_TABLE(t))); \ + }) #else static inline int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, @@ -766,16 +858,17 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, { return -ENXIO; } -static inline int acpi_dev_get_property_array(struct acpi_device *adev, - const char *name, - acpi_object_type type, - const union acpi_object **obj) + +static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, + const char *name, const char *cells_name, + size_t index, struct acpi_reference_args *args) { return -ENXIO; } -static inline int acpi_dev_get_property_reference(struct acpi_device *adev, - const char *name, const char *cells_name, - size_t index, struct acpi_reference_args *args) + +static inline int acpi_node_prop_get(struct fwnode_handle *fwnode, + const char *propname, + void **valptr) { return -ENXIO; } @@ -795,6 +888,14 @@ static inline int acpi_dev_prop_read_single(struct acpi_device *adev, return -ENXIO; } +static inline int acpi_node_prop_read(struct fwnode_handle *fwnode, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + static inline int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, @@ -803,12 +904,22 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev, return -ENXIO; } -static inline struct acpi_device *acpi_get_next_child(struct device *dev, - struct acpi_device *child) +static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, + struct fwnode_handle *subnode) { return NULL; } +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ + static const void * __acpi_table_##name[] \ + __attribute__((unused)) \ + = { (void *) table_id, \ + (void *) subtable, \ + (void *) valid, \ + (void *) fn, \ + (void *) data } + +#define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h deleted file mode 100644 index f10c87265855..000000000000 --- a/include/linux/acpi_irq.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _LINUX_ACPI_IRQ_H -#define _LINUX_ACPI_IRQ_H - -#include <linux/irq.h> - -#ifndef acpi_irq_init -static inline void acpi_irq_init(void) { } -#endif - -#endif /* _LINUX_ACPI_IRQ_H */ diff --git a/include/linux/aer.h b/include/linux/aer.h index 4fef65e57023..744b997d6a94 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -42,6 +42,7 @@ struct aer_capability_regs { int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); #else static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { @@ -55,6 +56,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) { return -EINVAL; } +static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +{ + return -EINVAL; +} #endif void cper_print_aer(struct pci_dev *dev, int cper_severity, diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 50fc66868402..9006c4e75cf7 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -41,8 +41,6 @@ struct amba_driver { int (*probe)(struct amba_device *, const struct amba_id *); int (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); - int (*suspend)(struct amba_device *, pm_message_t); - int (*resume)(struct amba_device *); const struct amba_id *id_table; }; diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h deleted file mode 100644 index df0356220730..000000000000 --- a/include/linux/arcdevice.h +++ /dev/null @@ -1,342 +0,0 @@ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. NET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * Definitions used by the ARCnet driver. - * - * Authors: Avery Pennarun and David Woodhouse - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ -#ifndef _LINUX_ARCDEVICE_H -#define _LINUX_ARCDEVICE_H - -#include <asm/timex.h> -#include <linux/if_arcnet.h> - -#ifdef __KERNEL__ -#include <linux/irqreturn.h> - -/* - * RECON_THRESHOLD is the maximum number of RECON messages to receive - * within one minute before printing a "cabling problem" warning. The - * default value should be fine. - * - * After that, a "cabling restored" message will be printed on the next IRQ - * if no RECON messages have been received for 10 seconds. - * - * Do not define RECON_THRESHOLD at all if you want to disable this feature. - */ -#define RECON_THRESHOLD 30 - - -/* - * Define this to the minimum "timeout" value. If a transmit takes longer - * than TX_TIMEOUT jiffies, Linux will abort the TX and retry. On a large - * network, or one with heavy network traffic, this timeout may need to be - * increased. The larger it is, though, the longer it will be between - * necessary transmits - don't set this too high. - */ -#define TX_TIMEOUT (HZ * 200 / 1000) - - -/* Display warnings about the driver being an ALPHA version. */ -#undef ALPHA_WARNING - - -/* - * Debugging bitflags: each option can be enabled individually. - * - * Note: only debug flags included in the ARCNET_DEBUG_MAX define will - * actually be available. GCC will (at least, GCC 2.7.0 will) notice - * lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize - * them out. - */ -#define D_NORMAL 1 /* important operational info */ -#define D_EXTRA 2 /* useful, but non-vital information */ -#define D_INIT 4 /* show init/probe messages */ -#define D_INIT_REASONS 8 /* show reasons for discarding probes */ -#define D_RECON 32 /* print a message whenever token is lost */ -#define D_PROTO 64 /* debug auto-protocol support */ -/* debug levels below give LOTS of output during normal operation! */ -#define D_DURING 128 /* trace operations (including irq's) */ -#define D_TX 256 /* show tx packets */ -#define D_RX 512 /* show rx packets */ -#define D_SKB 1024 /* show skb's */ -#define D_SKB_SIZE 2048 /* show skb sizes */ -#define D_TIMING 4096 /* show time needed to copy buffers to card */ -#define D_DEBUG 8192 /* Very detailed debug line for line */ - -#ifndef ARCNET_DEBUG_MAX -#define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */ -#endif - -#ifndef ARCNET_DEBUG -#define ARCNET_DEBUG (D_NORMAL|D_EXTRA) -#endif -extern int arcnet_debug; - -/* macros to simplify debug checking */ -#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x)) -#define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0) -#define BUGMSG(x,msg,args...) \ - BUGMSG2(x, "%s%6s: " msg, \ - x==D_NORMAL ? KERN_WARNING \ - : x < D_DURING ? KERN_INFO : KERN_DEBUG, \ - dev->name , ## args) - -/* see how long a function call takes to run, expressed in CPU cycles */ -#define TIME(name, bytes, call) BUGLVL(D_TIMING) { \ - unsigned long _x, _y; \ - _x = get_cycles(); \ - call; \ - _y = get_cycles(); \ - BUGMSG(D_TIMING, \ - "%s: %d bytes in %lu cycles == " \ - "%lu Kbytes/100Mcycle\n",\ - name, bytes, _y - _x, \ - 100000000 / 1024 * bytes / (_y - _x + 1));\ - } \ - else { \ - call;\ - } - - -/* - * Time needed to reset the card - in ms (milliseconds). This works on my - * SMC PC100. I can't find a reference that tells me just how long I - * should wait. - */ -#define RESETtime (300) - -/* - * These are the max/min lengths of packet payload, not including the - * arc_hardware header, but definitely including the soft header. - * - * Note: packet sizes 254, 255, 256 are impossible because of the way - * ARCnet registers work That's why RFC1201 defines "exception" packets. - * In non-RFC1201 protocols, we have to just tack some extra bytes on the - * end. - */ -#define MTU 253 /* normal packet max size */ -#define MinTU 257 /* extended packet min size */ -#define XMTU 508 /* extended packet max size */ - -/* status/interrupt mask bit fields */ -#define TXFREEflag 0x01 /* transmitter available */ -#define TXACKflag 0x02 /* transmitted msg. ackd */ -#define RECONflag 0x04 /* network reconfigured */ -#define TESTflag 0x08 /* test flag */ -#define EXCNAKflag 0x08 /* excesive nak flag */ -#define RESETflag 0x10 /* power-on-reset */ -#define RES1flag 0x20 /* reserved - usually set by jumper */ -#define RES2flag 0x40 /* reserved - usually set by jumper */ -#define NORXflag 0x80 /* receiver inhibited */ - -/* Flags used for IO-mapped memory operations */ -#define AUTOINCflag 0x40 /* Increase location with each access */ -#define IOMAPflag 0x02 /* (for 90xx) Use IO mapped memory, not mmap */ -#define ENABLE16flag 0x80 /* (for 90xx) Enable 16-bit mode */ - -/* in the command register, the following bits have these meanings: - * 0-2 command - * 3-4 page number (for enable rcv/xmt command) - * 7 receive broadcasts - */ -#define NOTXcmd 0x01 /* disable transmitter */ -#define NORXcmd 0x02 /* disable receiver */ -#define TXcmd 0x03 /* enable transmitter */ -#define RXcmd 0x04 /* enable receiver */ -#define CONFIGcmd 0x05 /* define configuration */ -#define CFLAGScmd 0x06 /* clear flags */ -#define TESTcmd 0x07 /* load test flags */ - -/* flags for "clear flags" command */ -#define RESETclear 0x08 /* power-on-reset */ -#define CONFIGclear 0x10 /* system reconfigured */ - -#define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */ - -/* flags for "load test flags" command */ -#define TESTload 0x08 /* test flag (diagnostic) */ - -/* byte deposited into first address of buffers on reset */ -#define TESTvalue 0321 /* that's octal for 0xD1 :) */ - -/* for "enable receiver" command */ -#define RXbcasts 0x80 /* receive broadcasts */ - -/* flags for "define configuration" command */ -#define NORMALconf 0x00 /* 1-249 byte packets */ -#define EXTconf 0x08 /* 250-504 byte packets */ - -/* card feature flags, set during auto-detection. - * (currently only used by com20020pci) - */ -#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */ -#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit, - but default is 2.5MBit. */ - - -/* information needed to define an encapsulation driver */ -struct ArcProto { - char suffix; /* a for RFC1201, e for ether-encap, etc. */ - int mtu; /* largest possible packet */ - int is_ip; /* This is a ip plugin - not a raw thing */ - - void (*rx) (struct net_device * dev, int bufnum, - struct archdr * pkthdr, int length); - int (*build_header) (struct sk_buff * skb, struct net_device *dev, - unsigned short ethproto, uint8_t daddr); - - /* these functions return '1' if the skb can now be freed */ - int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length, - int bufnum); - int (*continue_tx) (struct net_device * dev, int bufnum); - int (*ack_tx) (struct net_device * dev, int acked); -}; - -extern struct ArcProto *arc_proto_map[256], *arc_proto_default, - *arc_bcast_proto, *arc_raw_proto; - - -/* - * "Incoming" is information needed for each address that could be sending - * to us. Mostly for partially-received split packets. - */ -struct Incoming { - struct sk_buff *skb; /* packet data buffer */ - __be16 sequence; /* sequence number of assembly */ - uint8_t lastpacket, /* number of last packet (from 1) */ - numpackets; /* number of packets in split */ -}; - - -/* only needed for RFC1201 */ -struct Outgoing { - struct ArcProto *proto; /* protocol driver that owns this: - * if NULL, no packet is pending. - */ - struct sk_buff *skb; /* buffer from upper levels */ - struct archdr *pkt; /* a pointer into the skb */ - uint16_t length, /* bytes total */ - dataleft, /* bytes left */ - segnum, /* segment being sent */ - numsegs; /* number of segments */ -}; - - -struct arcnet_local { - uint8_t config, /* current value of CONFIG register */ - timeout, /* Extended timeout for COM20020 */ - backplane, /* Backplane flag for COM20020 */ - clockp, /* COM20020 clock divider */ - clockm, /* COM20020 clock multiplier flag */ - setup, /* Contents of setup1 register */ - setup2, /* Contents of setup2 register */ - intmask; /* current value of INTMASK register */ - uint8_t default_proto[256]; /* default encap to use for each host */ - int cur_tx, /* buffer used by current transmit, or -1 */ - next_tx, /* buffer where a packet is ready to send */ - cur_rx; /* current receive buffer */ - int lastload_dest, /* can last loaded packet be acked? */ - lasttrans_dest; /* can last TX'd packet be acked? */ - int timed_out; /* need to process TX timeout and drop packet */ - unsigned long last_timeout; /* time of last reported timeout */ - char *card_name; /* card ident string */ - int card_flags; /* special card features */ - - - /* On preemtive and SMB a lock is needed */ - spinlock_t lock; - - /* - * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of - * which can be used for either sending or receiving. The new dynamic - * buffer management routines use a simple circular queue of available - * buffers, and take them as they're needed. This way, we simplify - * situations in which we (for example) want to pre-load a transmit - * buffer, or start receiving while we copy a received packet to - * memory. - * - * The rules: only the interrupt handler is allowed to _add_ buffers to - * the queue; thus, this doesn't require a lock. Both the interrupt - * handler and the transmit function will want to _remove_ buffers, so - * we need to handle the situation where they try to do it at the same - * time. - * - * If next_buf == first_free_buf, the queue is empty. Since there are - * only four possible buffers, the queue should never be full. - */ - atomic_t buf_lock; - int buf_queue[5]; - int next_buf, first_free_buf; - - /* network "reconfiguration" handling */ - unsigned long first_recon; /* time of "first" RECON message to count */ - unsigned long last_recon; /* time of most recent RECON */ - int num_recons; /* number of RECONs between first and last. */ - int network_down; /* do we think the network is down? */ - - int excnak_pending; /* We just got an excesive nak interrupt */ - - struct { - uint16_t sequence; /* sequence number (incs with each packet) */ - __be16 aborted_seq; - - struct Incoming incoming[256]; /* one from each address */ - } rfc1201; - - /* really only used by rfc1201, but we'll pretend it's not */ - struct Outgoing outgoing; /* packet currently being sent */ - - /* hardware-specific functions */ - struct { - struct module *owner; - void (*command) (struct net_device * dev, int cmd); - int (*status) (struct net_device * dev); - void (*intmask) (struct net_device * dev, int mask); - int (*reset) (struct net_device * dev, int really_reset); - void (*open) (struct net_device * dev); - void (*close) (struct net_device * dev); - - void (*copy_to_card) (struct net_device * dev, int bufnum, int offset, - void *buf, int count); - void (*copy_from_card) (struct net_device * dev, int bufnum, int offset, - void *buf, int count); - } hw; - - void __iomem *mem_start; /* pointer to ioremap'ed MMIO */ -}; - - -#define ARCRESET(x) (lp->hw.reset(dev, (x))) -#define ACOMMAND(x) (lp->hw.command(dev, (x))) -#define ASTATUS() (lp->hw.status(dev)) -#define AINTMASK(x) (lp->hw.intmask(dev, (x))) - - - -#if ARCNET_DEBUG_MAX & D_SKB -void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); -#else -#define arcnet_dump_skb(dev,skb,desc) ; -#endif - -void arcnet_unregister_proto(struct ArcProto *proto); -irqreturn_t arcnet_interrupt(int irq, void *dev_id); -struct net_device *alloc_arcdev(const char *name); - -int arcnet_open(struct net_device *dev); -int arcnet_close(struct net_device *dev); -netdev_tx_t arcnet_send_packet(struct sk_buff *skb, - struct net_device *dev); -void arcnet_timeout(struct net_device *dev); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_ARCDEVICE_H */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 00a5763e850e..301de78d65f7 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -81,6 +81,30 @@ #endif #endif /* atomic_add_return_relaxed */ +/* atomic_inc_return_relaxed */ +#ifndef atomic_inc_return_relaxed +#define atomic_inc_return_relaxed atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +#define atomic_inc_return_acquire(...) \ + __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return_release +#define atomic_inc_return_release(...) \ + __atomic_op_release(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return +#define atomic_inc_return(...) \ + __atomic_op_fence(atomic_inc_return, __VA_ARGS__) +#endif +#endif /* atomic_inc_return_relaxed */ + /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_relaxed #define atomic_sub_return_relaxed atomic_sub_return @@ -105,6 +129,30 @@ #endif #endif /* atomic_sub_return_relaxed */ +/* atomic_dec_return_relaxed */ +#ifndef atomic_dec_return_relaxed +#define atomic_dec_return_relaxed atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +#define atomic_dec_return_acquire(...) \ + __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return_release +#define atomic_dec_return_release(...) \ + __atomic_op_release(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return +#define atomic_dec_return(...) \ + __atomic_op_fence(atomic_dec_return, __VA_ARGS__) +#endif +#endif /* atomic_dec_return_relaxed */ + /* atomic_xchg_relaxed */ #ifndef atomic_xchg_relaxed #define atomic_xchg_relaxed atomic_xchg @@ -185,6 +233,31 @@ #endif #endif /* atomic64_add_return_relaxed */ +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_relaxed #define atomic64_sub_return_relaxed atomic64_sub_return @@ -209,6 +282,30 @@ #endif #endif /* atomic64_sub_return_relaxed */ +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_relaxed #define atomic64_xchg_relaxed atomic64_xchg @@ -451,7 +548,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -#include <asm-generic/atomic-long.h> #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif @@ -463,4 +559,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v) } #endif +#include <asm-generic/atomic-long.h> + #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index b2abc996c25d..20eba1eb0a3c 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -143,7 +143,7 @@ extern void __audit_inode_child(const struct inode *parent, extern void __audit_seccomp(unsigned long syscall, long signr, int code); extern void __audit_ptrace(struct task_struct *t); -static inline int audit_dummy_context(void) +static inline bool audit_dummy_context(void) { void *p = current->audit_context; return !p || *(int *)p; @@ -345,9 +345,9 @@ static inline void audit_syscall_entry(int major, unsigned long a0, { } static inline void audit_syscall_exit(void *pt_regs) { } -static inline int audit_dummy_context(void) +static inline bool audit_dummy_context(void) { - return 1; + return true; } static inline struct filename *audit_reusename(const __user char *name) { @@ -457,7 +457,7 @@ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp extern __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); extern void audit_log_end(struct audit_buffer *ab); -extern int audit_string_contains_control(const char *string, +extern bool audit_string_contains_control(const char *string, size_t len); extern void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index a23209b43842..1b4d69f68c33 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,8 @@ struct bdi_writeback { struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + struct list_head bdi_node; /* anchored at bdi->wb_list */ + #ifdef CONFIG_CGROUP_WRITEBACK struct percpu_ref refcnt; /* used only for !root wb's */ struct fprop_local_percpu memcg_completions; @@ -150,6 +152,7 @@ struct backing_dev_info { atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; /* the root writeback info for this bdi */ + struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 5a5d79ee256f..c82794f20110 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -18,13 +18,17 @@ #include <linux/slab.h> int __must_check bdi_init(struct backing_dev_info *bdi); -void bdi_destroy(struct backing_dev_info *bdi); +void bdi_exit(struct backing_dev_info *bdi); __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +void bdi_unregister(struct backing_dev_info *bdi); + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); +void bdi_destroy(struct backing_dev_info *bdi); + void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, bool range_cyclic, enum wb_reason reason); void wb_start_background_writeback(struct bdi_writeback *wb); @@ -252,13 +256,19 @@ int inode_congested(struct inode *inode, int cong_bits); * @inode: inode of interest * * cgroup writeback requires support from both the bdi and filesystem. - * Test whether @inode has both. + * Also, both memcg and iocg have to be on the default hierarchy. Test + * whether all conditions are met. + * + * Note that the test result may change dynamically on the same inode + * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return bdi_cap_account_dirty(bdi) && + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys) && + bdi_cap_account_dirty(bdi) && (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -401,61 +411,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) rcu_read_unlock(); } -struct wb_iter { - int start_memcg_id; - struct radix_tree_iter tree_iter; - void **slot; -}; - -static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, - struct backing_dev_info *bdi) -{ - struct radix_tree_iter *titer = &iter->tree_iter; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - if (iter->start_memcg_id >= 0) { - iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); - iter->start_memcg_id = -1; - } else { - iter->slot = radix_tree_next_slot(iter->slot, titer, 0); - } - - if (!iter->slot) - iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); - if (iter->slot) - return *iter->slot; - return NULL; -} - -static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, - struct backing_dev_info *bdi, - int start_memcg_id) -{ - iter->start_memcg_id = start_memcg_id; - - if (start_memcg_id) - return __wb_iter_next(iter, bdi); - else - return &bdi->wb; -} - -/** - * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order - * @wb_cur: cursor struct bdi_writeback pointer - * @bdi: bdi to walk wb's of - * @iter: pointer to struct wb_iter to be used as iteration buffer - * @start_memcg_id: memcg ID to start iteration from - * - * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending - * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter - * to be used as temp storage during iteration. rcu_read_lock() must be - * held throughout iteration. - */ -#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ - for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ - (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) - #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -515,14 +470,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) { } -struct wb_iter { - int next_id; -}; - -#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ - for ((iter)->next_id = (start_blkcg_id); \ - ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) - static inline int inode_congested(struct inode *inode, int cong_bits) { return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 2ff4a9961e1d..3feb1b2d75d8 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -151,6 +151,8 @@ struct bcma_host_ops { #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ #define BCMA_CORE_USB30_DEV 0x83D #define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_ARM_CA7 0x847 +#define BCMA_CORE_SYS_MEM 0x849 #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 0a5cc7a1109b..c02e669945e9 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..83cc9d4e5455 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -145,7 +145,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, - BLK_MQ_F_SYSFS_UP = 1 << 3, BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -167,7 +166,6 @@ enum { struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); -void blk_mq_finish_init(struct request_queue *q); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); @@ -215,7 +213,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_abort_requeue_list(struct request_queue *q); -void blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request(struct request *rq, int error); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -224,8 +222,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, - void *priv); void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 38a5ff772a37..d045ca8487af 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -35,6 +35,7 @@ struct sg_io_hdr; struct bsg_job; struct blkcg_gq; struct blk_flush_queue; +struct pr_ops; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -369,6 +370,10 @@ struct request_queue { */ struct kobject mq_kobj; +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity integrity; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + #ifdef CONFIG_PM struct device *dev; int rpm_status; @@ -450,12 +455,14 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; - struct percpu_ref mq_usage_counter; + struct percpu_ref q_usage_counter; struct list_head all_q_node; struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; + + bool mq_sysfs_init_done; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ @@ -1368,6 +1375,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); } +static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, + struct bio *next) +{ + if (!bio_has_data(prev)) + return false; + + return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1], + next->bi_io_vec[0].bv_offset); +} + +static inline bool req_gap_back_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, req->biotail, bio); +} + +static inline bool req_gap_front_merge(struct request *req, struct bio *bio) +{ + return bio_will_gap(req->q, bio, req->bio); +} + struct work_struct; int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); @@ -1440,22 +1467,13 @@ struct blk_integrity_iter { typedef int (integrity_processing_fn) (struct blk_integrity_iter *); -struct blk_integrity { - integrity_processing_fn *generate_fn; - integrity_processing_fn *verify_fn; - - unsigned short flags; - unsigned short tuple_size; - unsigned short interval; - unsigned short tag_size; - - const char *name; - - struct kobject kobj; +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + const char *name; }; -extern bool blk_integrity_is_initialized(struct gendisk *); -extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); +extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); extern void blk_integrity_unregister(struct gendisk *); extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, @@ -1466,15 +1484,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, struct bio *); -static inline -struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { - return bdev->bd_disk->integrity; + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return NULL; + + return bi; } -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) { - return disk->integrity; + return blk_get_integrity(bdev->bd_disk); } static inline bool blk_integrity_rq(struct request *rq) @@ -1494,6 +1517,26 @@ queue_max_integrity_segments(struct request_queue *q) return q->limits.max_integrity_segments; } +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + struct bio_integrity_payload *bip = bio_integrity(req->bio); + struct bio_integrity_payload *bip_next = bio_integrity(next); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_integrity_payload *bip_next = bio_integrity(req->bio); + + return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], + bip_next->bip_vec[0].bv_offset); +} + #else /* CONFIG_BLK_DEV_INTEGRITY */ struct bio; @@ -1528,10 +1571,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) { return 0; } -static inline int blk_integrity_register(struct gendisk *d, +static inline void blk_integrity_register(struct gendisk *d, struct blk_integrity *b) { - return 0; } static inline void blk_integrity_unregister(struct gendisk *d) { @@ -1556,9 +1598,16 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq, { return true; } -static inline bool blk_integrity_is_initialized(struct gendisk *g) + +static inline bool integrity_req_gap_back_merge(struct request *req, + struct bio *next) +{ + return false; +} +static inline bool integrity_req_gap_front_merge(struct request *req, + struct bio *bio) { - return 0; + return false; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -1581,6 +1630,7 @@ struct block_device_operations { /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); struct module *owner; + const struct pr_ops *pr_ops; }; extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f57d7fed9ec3..de464e6683b6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -10,7 +10,6 @@ #include <uapi/linux/bpf.h> #include <linux/workqueue.h> #include <linux/file.h> -#include <linux/perf_event.h> struct bpf_map; @@ -37,6 +36,8 @@ struct bpf_map { u32 key_size; u32 value_size; u32 max_entries; + u32 pages; + struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; }; @@ -101,6 +102,8 @@ enum bpf_access_type { BPF_WRITE = 2 }; +struct bpf_prog; + struct bpf_verifier_ops { /* return eBPF function prototype for verification */ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); @@ -112,7 +115,7 @@ struct bpf_verifier_ops { u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, - struct bpf_insn *insn); + struct bpf_insn *insn, struct bpf_prog *prog); }; struct bpf_prog_type_list { @@ -121,14 +124,13 @@ struct bpf_prog_type_list { enum bpf_prog_type type; }; -struct bpf_prog; - struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; + struct user_struct *user; union { struct work_struct work; struct rcu_head rcu; @@ -165,9 +167,18 @@ struct bpf_prog *bpf_prog_get(u32 ufd); void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put_rcu(struct bpf_prog *prog); -struct bpf_map *bpf_map_get(struct fd f); +struct bpf_map *bpf_map_get(u32 ufd); +struct bpf_map *__bpf_map_get(struct fd f); void bpf_map_put(struct bpf_map *map); +extern int sysctl_unprivileged_bpf_disabled; + +int bpf_map_new_fd(struct bpf_map *map); +int bpf_prog_new_fd(struct bpf_prog *prog); + +int bpf_obj_pin_user(u32 ufd, const char __user *pathname); +int bpf_obj_get_user(const char __user *pathname); + /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else @@ -190,7 +201,6 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; -extern const struct bpf_func_proto bpf_perf_event_read_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; @@ -201,4 +211,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_skb_vlan_push_proto; extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; +/* Shared helpers among cBPF and eBPF. */ +void bpf_user_rnd_init_once(void); +u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 697ca7795bd9..59f4a7304419 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -30,6 +30,8 @@ #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 +#define PHY_ID_BCM_CYGNUS 0xae025200 + #define PHY_BCM_OUI_MASK 0xfffffc00 #define PHY_BCM_OUI_1 0x00206000 #define PHY_BCM_OUI_2 0x0143bc00 @@ -138,7 +140,10 @@ /* 01010: Auto Power-Down */ #define BCM54XX_SHD_APD 0x0a +#define BCM_APD_CLR_MASK 0xFE9F /* clear bits 5, 6 & 8 */ #define BCM54XX_SHD_APD_EN 0x0020 +#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */ +#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */ #define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ /* LED3 / ~LINKSPD[2] selector */ @@ -209,27 +214,13 @@ #define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ #define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ -/* - * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T - * 0x1c shadow registers. - */ -static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) -{ - phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); - return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); -} - -static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, - u16 val) -{ - return phy_write(phydev, MII_BCM54XX_SHD, - MII_BCM54XX_SHD_WRITE | - MII_BCM54XX_SHD_VAL(shadow) | - MII_BCM54XX_SHD_DATA(val)); -} - #define BRCM_CL45VEN_EEE_CONTROL 0x803d #define LPI_FEATURE_EN 0x8000 #define LPI_FEATURE_EN_DIG1000X 0x4000 +/* Core register definitions*/ +#define MII_BRCM_CORE_BASE1E 0x1E +#define MII_BRCM_CORE_EXPB0 0xB0 +#define MII_BRCM_CORE_EXPB1 0xB1 + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index c3a9c8fc60fa..735f9f8c4e43 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -14,9 +14,10 @@ #define _CAN_DEV_H #include <linux/can.h> -#include <linux/can/netlink.h> #include <linux/can/error.h> #include <linux/can/led.h> +#include <linux/can/netlink.h> +#include <linux/netdevice.h> /* * CAN mode @@ -77,7 +78,7 @@ struct can_priv { #define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ -static inline int can_dropped_invalid_skb(struct net_device *dev, +static inline bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) { const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; @@ -93,12 +94,12 @@ static inline int can_dropped_invalid_skb(struct net_device *dev, } else goto inval_skb; - return 0; + return false; inval_skb: kfree_skb(skb); dev->stats.tx_dropped++; - return 1; + return true; } static inline bool can_is_canfd_skb(const struct sk_buff *skb) diff --git a/include/linux/can/led.h b/include/linux/can/led.h index 146de4506d21..2746f7c2f87d 100644 --- a/include/linux/can/led.h +++ b/include/linux/can/led.h @@ -11,6 +11,7 @@ #include <linux/if.h> #include <linux/leds.h> +#include <linux/netdevice.h> enum can_led_event { CAN_LED_EVENT_OPEN, diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 4763ad64e832..f89b31d45cc8 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -107,6 +107,7 @@ static inline u64 ceph_sanitize_features(u64 features) CEPH_FEATURE_OSDMAP_ENC | \ CEPH_FEATURE_CRUSH_TUNABLES3 | \ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ + CEPH_FEATURE_MSGR_KEEPALIVE2 | \ CEPH_FEATURE_CRUSH_V4) #define CEPH_FEATURES_REQUIRED_DEFAULT \ diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 7e1252e97a30..b2371d9b51fa 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -238,6 +238,8 @@ struct ceph_connection { bool out_kvec_is_msg; /* kvec refers to out_msg */ int out_more; /* there is more data after the kvecs */ __le64 out_temp_ack; /* for writing an ack */ + struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2 + stamp */ /* message in temps */ struct ceph_msg_header in_hdr; @@ -248,7 +250,7 @@ struct ceph_connection { int in_base_pos; /* bytes read */ __le64 in_temp_ack; /* for reading an ack */ - struct timespec last_keepalive_ack; + struct timespec last_keepalive_ack; /* keepalive2 ack stamp */ struct delayed_work work; /* send|recv work */ unsigned long delay; /* current delay interval */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 4d8fcf2187dc..60d44b26276d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -76,6 +76,7 @@ enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ + CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ @@ -83,6 +84,17 @@ enum { }; /* + * cgroup_file is the handle for a file instance created in a cgroup which + * is used, for example, to generate file changed notifications. This can + * be obtained by setting cftype->file_offset. + */ +struct cgroup_file { + /* do not access any fields from outside cgroup core */ + struct list_head node; /* anchored at css->files */ + struct kernfs_node *kn; +}; + +/* * Per-subsystem/per-cgroup state maintained by the system. This is the * fundamental structural building block that controllers deal with. * @@ -122,6 +134,9 @@ struct cgroup_subsys_state { */ u64 serial_nr; + /* all cgroup_files associated with this css */ + struct list_head files; + /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; @@ -196,6 +211,9 @@ struct css_set { */ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* For RCU-protected deletion */ struct rcu_head rcu_head; }; @@ -217,16 +235,16 @@ struct cgroup { int id; /* - * If this cgroup contains any tasks, it contributes one to - * populated_cnt. All children with non-zero popuplated_cnt of - * their own contribute one. The count is zero iff there's no task - * in this cgroup or its subtree. + * Each non-empty css_set associated with this cgroup contributes + * one to populated_cnt. All children with non-zero popuplated_cnt + * of their own contribute one. The count is zero iff there's no + * task in this cgroup or its subtree. */ int populated_cnt; struct kernfs_node *kn; /* cgroup kernfs entry */ - struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */ - struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ + struct cgroup_file procs_file; /* handle for "cgroup.procs" */ + struct cgroup_file events_file; /* handle for "cgroup.events" */ /* * The bitmask of subsystems enabled on the child cgroups. @@ -324,11 +342,6 @@ struct cftype { */ char name[MAX_CFTYPE_NAME]; unsigned long private; - /* - * If not 0, file mode is set to this value, otherwise it will - * be figured out automatically - */ - umode_t mode; /* * The maximum length of string, excluding trailing nul, that can @@ -340,6 +353,14 @@ struct cftype { unsigned int flags; /* + * If non-zero, should contain the offset from the start of css to + * a struct cgroup_file field. cgroup will record the handle of + * the created file into it. The recorded handle can be used as + * long as the containing css remains accessible. + */ + unsigned int file_offset; + + /* * Fields used for internal bookkeeping. Initialized automatically * during registration. */ @@ -414,12 +435,10 @@ struct cgroup_subsys { int (*can_fork)(struct task_struct *task, void **priv_p); void (*cancel_fork)(struct task_struct *task, void *priv); void (*fork)(struct task_struct *task, void *priv); - void (*exit)(struct cgroup_subsys_state *css, - struct cgroup_subsys_state *old_css, - struct task_struct *task); + void (*exit)(struct task_struct *task); + void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); - int disabled; int early_init; /* diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index eb7ca55f72ef..22e3754f89c5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -13,10 +13,10 @@ #include <linux/nodemask.h> #include <linux/rculist.h> #include <linux/cgroupstats.h> -#include <linux/rwsem.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kernfs.h> +#include <linux/jump_label.h> #include <linux/cgroup-defs.h> @@ -41,6 +41,10 @@ struct css_task_iter { struct list_head *task_pos; struct list_head *tasks_head; struct list_head *mg_tasks_head; + + struct css_set *cur_cset; + struct task_struct *cur_task; + struct list_head iters_node; /* css_set->task_iters */ }; extern struct cgroup_root cgrp_dfl_root; @@ -50,6 +54,26 @@ extern struct css_set init_css_set; #include <linux/cgroup_subsys.h> #undef SUBSYS +#define SUBSYS(_x) \ + extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ + extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; +#include <linux/cgroup_subsys.h> +#undef SUBSYS + +/** + * cgroup_subsys_enabled - fast test on whether a subsys is enabled + * @ss: subsystem in question + */ +#define cgroup_subsys_enabled(ss) \ + static_branch_likely(&ss ## _enabled_key) + +/** + * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy + * @ss: subsystem in question + */ +#define cgroup_subsys_on_dfl(ss) \ + static_branch_likely(&ss ## _on_dfl_key) + bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, @@ -78,6 +102,7 @@ extern void cgroup_cancel_fork(struct task_struct *p, extern void cgroup_post_fork(struct task_struct *p, void *old_ss_priv[CGROUP_CANFORK_COUNT]); void cgroup_exit(struct task_struct *p); +void cgroup_free(struct task_struct *p); int cgroup_init_early(void); int cgroup_init(void); @@ -211,11 +236,33 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each - iterate cgroup_taskset * @task: the loop cursor * @tset: taskset to iterate + * + * @tset may contain multiple tasks and they may belong to multiple + * processes. When there are multiple tasks in @tset, if a task of a + * process is in @tset, all tasks of the process are in @tset. Also, all + * are guaranteed to share the same source and destination csses. + * + * Iteration is not in any specific order. */ #define cgroup_taskset_for_each(task, tset) \ for ((task) = cgroup_taskset_first((tset)); (task); \ (task) = cgroup_taskset_next((tset))) +/** + * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset + * @leader: the loop cursor + * @tset: takset to iterate + * + * Iterate threadgroup leaders of @tset. For single-task migrations, @tset + * may not contain any. + */ +#define cgroup_taskset_for_each_leader(leader, tset) \ + for ((leader) = cgroup_taskset_first((tset)); (leader); \ + (leader) = cgroup_taskset_next((tset))) \ + if ((leader) != (leader)->group_leader) \ + ; \ + else + /* * Inline functions. */ @@ -320,11 +367,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) */ #ifdef CONFIG_PROVE_RCU extern struct mutex cgroup_mutex; -extern struct rw_semaphore css_set_rwsem; +extern spinlock_t css_set_lock; #define task_css_set_check(task, __c) \ rcu_dereference_check((task)->cgroups, \ lockdep_is_held(&cgroup_mutex) || \ - lockdep_is_held(&css_set_rwsem) || \ + lockdep_is_held(&css_set_lock) || \ ((task)->flags & PF_EXITING) || (__c)) #else #define task_css_set_check(task, __c) \ @@ -412,68 +459,10 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, return task_css(task, subsys_id)->cgroup; } -/** - * cgroup_on_dfl - test whether a cgroup is on the default hierarchy - * @cgrp: the cgroup of interest - * - * The default hierarchy is the v2 interface of cgroup and this function - * can be used to test whether a cgroup is on the default hierarchy for - * cases where a subsystem should behave differnetly depending on the - * interface version. - * - * The set of behaviors which change on the default hierarchy are still - * being determined and the mount option is prefixed with __DEVEL__. - * - * List of changed behaviors: - * - * - Mount options "noprefix", "xattr", "clone_children", "release_agent" - * and "name" are disallowed. - * - * - When mounting an existing superblock, mount options should match. - * - * - Remount is disallowed. - * - * - rename(2) is disallowed. - * - * - "tasks" is removed. Everything should be at process granularity. Use - * "cgroup.procs" instead. - * - * - "cgroup.procs" is not sorted. pids will be unique unless they got - * recycled inbetween reads. - * - * - "release_agent" and "notify_on_release" are removed. Replacement - * notification mechanism will be implemented. - * - * - "cgroup.clone_children" is removed. - * - * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup - * and its descendants contain no task; otherwise, 1. The file also - * generates kernfs notification which can be monitored through poll and - * [di]notify when the value of the file changes. - * - * - cpuset: tasks will be kept in empty cpusets when hotplug happens and - * take masks of ancestors with non-empty cpus/mems, instead of being - * moved to an ancestor. - * - * - cpuset: a task can be moved into an empty cpuset, and again it takes - * masks of ancestors. - * - * - memcg: use_hierarchy is on by default and the cgroup file for the flag - * is not created. - * - * - blkcg: blk-throttle becomes properly hierarchical. - * - * - debug: disallowed on the default hierarchy. - */ -static inline bool cgroup_on_dfl(const struct cgroup *cgrp) -{ - return cgrp->root == &cgrp_dfl_root; -} - /* no synchronization, the result can only be used as a hint */ -static inline bool cgroup_has_tasks(struct cgroup *cgrp) +static inline bool cgroup_is_populated(struct cgroup *cgrp) { - return !list_empty(&cgrp->cset_links); + return cgrp->populated_cnt; } /* returns ino associated with a cgroup */ @@ -527,6 +516,19 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +/** + * cgroup_file_notify - generate a file modified event for a cgroup_file + * @cfile: target cgroup_file + * + * @cfile must have been obtained by setting cftype->file_offset. + */ +static inline void cgroup_file_notify(struct cgroup_file *cfile) +{ + /* might not have been created due to one of the CFTYPE selector flags */ + if (cfile->kn) + kernfs_notify(cfile->kn); +} + #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -546,6 +548,7 @@ static inline void cgroup_cancel_fork(struct task_struct *p, static inline void cgroup_post_fork(struct task_struct *p, void *ss_priv[CGROUP_CANFORK_COUNT]) {} static inline void cgroup_exit(struct task_struct *p) {} +static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 3ecc07d0da77..c56988ac63f7 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -500,13 +500,14 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, * * Clock with adjustable fractional divider affecting its output frequency. */ - struct clk_fractional_divider { struct clk_hw hw; void __iomem *reg; u8 mshift; + u8 mwidth; u32 mmask; u8 nshift; + u8 nwidth; u32 nmask; u8 flags; spinlock_t *lock; @@ -518,6 +519,41 @@ struct clk *clk_register_fractional_divider(struct device *dev, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock); +/** + * struct clk_multiplier - adjustable multiplier clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the multiplier + * @shift: shift to the multiplier bit field + * @width: width of the multiplier bit field + * @lock: register lock + * + * Clock with an adjustable multiplier affecting its output frequency. + * Implements .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read + * from the register, with 0 being a valid value effectively + * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is + * set, then a null multiplier will be considered as a bypass, + * leaving the parent rate unmodified. + * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be + * rounded to the closest integer instead of the down one. + */ +struct clk_multiplier { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) + +extern const struct clk_ops clk_multiplier_ops; + /*** * struct clk_composite - aggregate clock of mux, divider and gate clocks * @@ -606,7 +642,7 @@ void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); /* helper functions */ -const char *__clk_get_name(struct clk *clk); +const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); struct clk_hw *__clk_get_hw(struct clk *clk); unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); @@ -618,6 +654,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); int __clk_mux_determine_rate(struct clk_hw *hw, @@ -690,6 +727,15 @@ static inline struct clk *of_clk_src_onecell_get( { return ERR_PTR(-ENOENT); } +static inline int of_clk_get_parent_count(struct device_node *np) +{ + return 0; +} +static inline int of_clk_parent_fill(struct device_node *np, + const char **parents, unsigned int size) +{ + return 0; +} static inline const char *of_clk_get_parent_name(struct device_node *np, int index) { diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 7669f7618f39..1e6932222e11 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -164,6 +164,7 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ +#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ @@ -182,13 +183,18 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ -#define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */ -#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ -#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */ -#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */ -#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */ -#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */ -#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */ -#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_PID_MASK 0x3f +#define AT91_PMC_PCR_GCKCSS_OFFSET 8 +#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET) +#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */ +#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ +#define AT91_PMC_PCR_DIV_OFFSET 16 +#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET) +#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */ +#define AT91_PMC_PCR_GCKDIV_OFFSET 20 +#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET) +#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */ +#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ #endif diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 31ce435981fe..bdcf358dfce2 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -18,15 +18,6 @@ struct clock_event_device; struct module; -/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ -enum clock_event_mode { - CLOCK_EVT_MODE_UNUSED, - CLOCK_EVT_MODE_SHUTDOWN, - CLOCK_EVT_MODE_PERIODIC, - CLOCK_EVT_MODE_ONESHOT, - CLOCK_EVT_MODE_RESUME, -}; - /* * Possible states of a clock event device. * @@ -86,16 +77,14 @@ enum clock_event_state { * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) - * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE * @state_use_accessors:current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries - * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. - * @set_state_periodic: switch state to periodic, if !set_mode - * @set_state_oneshot: switch state to oneshot, if !set_mode - * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode - * @set_state_shutdown: switch state to shutdown, if !set_mode - * @tick_resume: resume clkevt device, if !set_mode + * @set_state_periodic: switch state to periodic + * @set_state_oneshot: switch state to oneshot + * @set_state_oneshot_stopped: switch state to oneshot_stopped + * @set_state_shutdown: switch state to shutdown + * @tick_resume: resume clkevt device * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration @@ -116,18 +105,10 @@ struct clock_event_device { u64 min_delta_ns; u32 mult; u32 shift; - enum clock_event_mode mode; enum clock_event_state state_use_accessors; unsigned int features; unsigned long retries; - /* - * State transition callback(s): Only one of the two groups should be - * defined: - * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. - * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume(). - */ - void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); int (*set_state_periodic)(struct clock_event_device *); int (*set_state_oneshot)(struct clock_event_device *); int (*set_state_oneshot_stopped)(struct clock_event_device *); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 278dd279a7a8..7784b597e959 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -246,16 +246,13 @@ extern int clocksource_i8253_init(void); #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ OF_DECLARE_1(clksrc, name, compat, fn) -#ifdef CONFIG_CLKSRC_OF -extern void clocksource_of_init(void); +#ifdef CONFIG_CLKSRC_PROBE +extern void clocksource_probe(void); #else -static inline void clocksource_of_init(void) {} +static inline void clocksource_probe(void) {} #endif -#ifdef CONFIG_ACPI -void acpi_generic_timer_init(void); -#else -static inline void acpi_generic_timer_init(void) { } -#endif +#define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn) \ + ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn) #endif /* _LINUX_CLOCKSOURCE_H */ diff --git a/include/linux/cma.h b/include/linux/cma.h index f7ef093ec49a..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/com20020.h b/include/linux/com20020.h deleted file mode 100644 index 85898995b234..000000000000 --- a/include/linux/com20020.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Linux ARCnet driver - COM20020 chipset support - function declarations - * - * Written 1997 by David Woodhouse. - * Written 1994-1999 by Avery Pennarun. - * Derived from skeleton.c by Donald Becker. - * - * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) - * for sponsoring the further development of this driver. - * - * ********************** - * - * The original copyright of skeleton.c was as follows: - * - * skeleton.c Written 1993 by Donald Becker. - * Copyright 1993 United States Government as represented by the - * Director, National Security Agency. This software may only be used - * and distributed according to the terms of the GNU General Public License as - * modified by SRC, incorporated herein by reference. - * - * ********************** - * - * For more details, see drivers/net/arcnet.c - * - * ********************** - */ -#ifndef __COM20020_H -#define __COM20020_H - -int com20020_check(struct net_device *dev); -int com20020_found(struct net_device *dev, int shared); -extern const struct net_device_ops com20020_netdev_ops; - -/* The number of low I/O ports used by the card. */ -#define ARCNET_TOTAL_SIZE 8 - -/* various register addresses */ -#ifdef CONFIG_SA1100_CT6001 -#define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */ -#else -#define BUS_ALIGN 1 -#endif - -#define PLX_PCI_MAX_CARDS 2 - -struct com20020_pci_channel_map { - u32 bar; - u32 offset; - u32 size; /* 0x00 - auto, e.g. length of entire bar */ -}; - -struct com20020_pci_card_info { - const char *name; - int devcount; - - struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS]; - - unsigned int flags; -}; - -struct com20020_priv { - struct com20020_pci_card_info *ci; - struct list_head list_dev; -}; - -struct com20020_dev { - struct list_head list; - struct net_device *dev; - - struct com20020_priv *pci_priv; - int index; -}; - -#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ -#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ -#define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */ -#define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */ -#define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */ -#define _ADDR_LO (ioaddr+BUS_ALIGN*3) -#define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */ -#define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */ -#define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */ -#define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG - or _SUBADR) */ - -/* in the ADDR_HI register */ -#define RDDATAflag 0x80 /* next access is a read (not a write) */ - -/* in the DIAGSTAT register */ -#define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */ - -/* in the CONFIG register */ -#define RESETcfg 0x80 /* put card in reset state */ -#define TXENcfg 0x20 /* enable TX */ - -/* in SETUP register */ -#define PROMISCset 0x10 /* enable RCV_ALL */ -#define P1MODE 0x80 /* enable P1-MODE for Backplane */ -#define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */ - -/* COM2002x */ -#define SUB_TENTATIVE 0 /* tentative node ID */ -#define SUB_NODE 1 /* node ID */ -#define SUB_SETUP1 2 /* various options */ -#define SUB_TEST 3 /* test/diag register */ - -/* COM20022 only */ -#define SUB_SETUP2 4 /* sundry options */ -#define SUB_BUSCTL 5 /* bus control options */ -#define SUB_DMACOUNT 6 /* DMA count options */ - -#define SET_SUBADR(x) do { \ - if ((x) < 4) \ - { \ - lp->config = (lp->config & ~0x03) | (x); \ - SETCONF; \ - } \ - else \ - { \ - outb(x, _SUBADR); \ - } \ -} while (0) - -#undef ARCRESET -#undef ASTATUS -#undef ACOMMAND -#undef AINTMASK - -#define ARCRESET { outb(lp->config | 0x80, _CONFIG); \ - udelay(5); \ - outb(lp->config , _CONFIG); \ - } -#define ARCRESET0 { outb(0x18 | 0x80, _CONFIG); \ - udelay(5); \ - outb(0x18 , _CONFIG); \ - } - -#define ASTATUS() inb(_STATUS) -#define ADIAGSTATUS() inb(_DIAGSTAT) -#define ACOMMAND(cmd) outb((cmd),_COMMAND) -#define AINTMASK(msk) outb((msk),_INTMASK) - -#define SETCONF outb(lp->config, _CONFIG) - -#endif /* __COM20020_H */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index aa8f61cf3a19..4cd4ddf64cc7 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -15,7 +15,8 @@ /* For more detailed tracepoint output */ #define COMPACT_NO_SUITABLE_PAGE 5 #define COMPACT_NOT_SUITABLE_ZONE 6 -/* When adding new state, please change compaction_status_string, too */ +#define COMPACT_CONTENDED 7 +/* When adding new states, please adjust include/trace/events/compaction.h */ /* Used to signal whether compaction detected need_sched() or lock contention */ /* No contention detected */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dfaa7b3e9ae9..0e3110a0b771 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -210,6 +210,23 @@ #define __visible __attribute__((externally_visible)) #endif + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#endif + /* * GCC 'asm goto' miscompiles certain code sequences: * @@ -237,12 +254,25 @@ #define KASAN_ABI_VERSION 3 #endif +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + #endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) #define __noclone /* not needed */ #endif +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c836eb2dc44d..4dac1036594f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -56,7 +56,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); #include <linux/compiler-gcc.h> #endif -#ifdef CC_USING_HOTPATCH +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) #define notrace __attribute__((hotpatch(0,0))) #else #define notrace __attribute__((no_instrument_function)) @@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #include <uapi/linux/types.h> -static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) { - switch (size) { - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; - default: - barrier(); - __builtin_memcpy((void *)res, (const void *)p, size); - barrier(); - } + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * This function is not 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; } +#else +static __always_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ -#define READ_ONCE(x) \ - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) #define WRITE_ONCE(x, val) \ ({ \ @@ -259,22 +299,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) -/** - * READ_ONCE_CTRL - Read a value heading a control dependency - * @x: The value to be read, heading the control dependency - * - * Control dependencies are tricky. See Documentation/memory-barriers.txt - * for important information on how to use them. Note that in many cases, - * use of smp_load_acquire() will be much simpler. Control dependencies - * should be avoided except on the hottest of hotpaths. - */ -#define READ_ONCE_CTRL(x) \ -({ \ - typeof(x) __val = READ_ONCE(x); \ - smp_read_barrier_depends(); /* Enforce control dependency. */ \ - __val; \ -}) - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -393,6 +417,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define __visible #endif +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/coresight.h b/include/linux/coresight.h index c69e1b932809..a7cabfa23b55 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -207,7 +207,7 @@ struct coresight_ops_link { * Operations available for sources. * @trace_id: returns the value of the component's trace ID as known to the HW. - * @enable: enables tracing from a source. + * @enable: enables tracing for a source. * @disable: disables tracing for a source. */ struct coresight_ops_source { diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 23c30bdcca86..d2ca8c38f9c4 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -228,7 +228,6 @@ extern struct bus_type cpu_subsys; extern void cpu_hotplug_begin(void); extern void cpu_hotplug_done(void); extern void get_online_cpus(void); -extern bool try_get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); @@ -246,7 +245,6 @@ int cpu_down(unsigned int cpu); static inline void cpu_hotplug_begin(void) {} static inline void cpu_hotplug_done(void) {} #define get_online_cpus() do { } while (0) -#define try_get_online_cpus() true #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 430efcbea48e..ef4c5b1a860f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -65,7 +65,6 @@ struct cpufreq_policy { unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs should set cpufreq */ unsigned int cpu; /* cpu managing this policy, must be online */ - unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */ struct clk *clk; struct cpufreq_cpuinfo cpuinfo;/* see above */ @@ -127,9 +126,14 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ #ifdef CONFIG_CPU_FREQ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *policy); #else +static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +{ + return NULL; +} static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { return NULL; @@ -144,10 +148,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy) /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ extern struct kobject *cpufreq_global_kobject; -int cpufreq_get_global_kobject(void); -void cpufreq_put_global_kobject(void); -int cpufreq_sysfs_create_file(const struct attribute *attr); -void cpufreq_sysfs_remove_file(const struct attribute *attr); #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_get(unsigned int cpu); diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b357997cac5..5a1311942358 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -93,7 +93,7 @@ extern int current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); -extern void cpuset_print_task_mems_allowed(struct task_struct *p); +extern void cpuset_print_current_mems_allowed(void); /* * read_mems_allowed_begin is required when making decisions involving @@ -219,7 +219,7 @@ static inline void rebuild_sched_domains(void) partition_sched_domains(1, NULL, NULL); } -static inline void cpuset_print_task_mems_allowed(struct task_struct *p) +static inline void cpuset_print_current_mems_allowed(void) { } diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 221025423e6c..61d042bbbf60 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -202,16 +202,16 @@ struct dccp_service_list { #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) #define DCCP_SERVICE_CODE_IS_ABSENT 0 -static inline int dccp_list_has_service(const struct dccp_service_list *sl, +static inline bool dccp_list_has_service(const struct dccp_service_list *sl, const __be32 service) { if (likely(sl != NULL)) { u32 i = sl->dccpsl_nr; while (i--) if (sl->dccpsl_list[i] == service) - return 1; + return true; } - return 0; + return false; } struct dccp_ackvec; diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 9beb636b97eb..19c066dce1da 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -79,6 +79,8 @@ struct dentry *debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value); struct dentry *debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value); +struct dentry *debugfs_create_ulong(const char *name, umode_t mode, + struct dentry *parent, unsigned long *value); struct dentry *debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value); struct dentry *debugfs_create_x16(const char *name, umode_t mode, @@ -92,7 +94,7 @@ struct dentry *debugfs_create_size_t(const char *name, umode_t mode, struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value); struct dentry *debugfs_create_bool(const char *name, umode_t mode, - struct dentry *parent, u32 *value); + struct dentry *parent, bool *value); struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, @@ -243,7 +245,7 @@ static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t m static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, - u32 *value) + bool *value) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index ce447f0f1bad..68030e22af35 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -65,7 +65,10 @@ struct devfreq_dev_status { * The "flags" parameter's possible values are * explained above with "DEVFREQ_FLAG_*" macros. * @get_dev_status: The device should provide the current performance - * status to devfreq, which is used by governors. + * status to devfreq. Governors are recommended not to + * use this directly. Instead, governors are recommended + * to use devfreq_update_stats() along with + * devfreq.last_status. * @get_cur_freq: The device should provide the current frequency * at which it is operating. * @exit: An optional callback that is called when devfreq @@ -161,6 +164,7 @@ struct devfreq { struct delayed_work work; unsigned long previous_freq; + struct devfreq_dev_status last_status; void *data; /* private data for governors */ @@ -204,6 +208,19 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +/** + * devfreq_update_stats() - update the last_status pointer in struct devfreq + * @df: the devfreq instance whose status needs updating + * + * Governors are recommended to use this function along with last_status, + * which allows other entities to reuse the last_status without affecting + * the values fetched later by governors. + */ +static inline int devfreq_update_stats(struct devfreq *df) +{ + return df->profile->get_dev_status(df->dev.parent, &df->last_status); +} + #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq @@ -289,6 +306,11 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { } + +static inline int devfreq_update_stats(struct devfreq *df) +{ + return -EINVAL; +} #endif /* CONFIG_PM_DEVFREQ */ #endif /* __LINUX_DEVFREQ_H__ */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 76d23fa8c7d3..ec1c61c87d89 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -79,8 +79,8 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); -typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, - unsigned long arg); +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, + struct block_device **bdev, fmode_t *mode); /* * These iteration functions are typically used to check (and combine) @@ -156,7 +156,7 @@ struct target_type { dm_resume_fn resume; dm_status_fn status; dm_message_fn message; - dm_ioctl_fn ioctl; + dm_prepare_ioctl_fn prepare_ioctl; dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 569bbd039896..fec734df1524 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, return ret; } -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, } static inline -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order) { return NULL; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h new file mode 100644 index 000000000000..fc481037478a --- /dev/null +++ b/include/linux/dma-iommu.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2014-2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __DMA_IOMMU_H +#define __DMA_IOMMU_H + +#ifdef __KERNEL__ +#include <asm/errno.h> + +#ifdef CONFIG_IOMMU_DMA +#include <linux/iommu.h> + +int iommu_dma_init(void); + +/* Domain management interface for IOMMU drivers */ +int iommu_get_dma_cookie(struct iommu_domain *domain); +void iommu_put_dma_cookie(struct iommu_domain *domain); + +/* Setup call for arch DMA mapping code */ +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); + +/* General helpers for DMA-API <-> IOMMU-API interaction */ +int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); + +/* + * These implement the bulk of the relevant DMA mapping callbacks, but require + * the arch code to take care of attributes and cache maintenance + */ +struct page **iommu_dma_alloc(struct device *dev, size_t size, + gfp_t gfp, int prot, dma_addr_t *handle, + void (*flush_page)(struct device *, const void *, phys_addr_t)); +void iommu_dma_free(struct device *dev, struct page **pages, size_t size, + dma_addr_t *handle); + +int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); + +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int prot); +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int prot); + +/* + * Arch code with no special attribute handling may use these + * directly as DMA mapping callbacks for simplicity + */ +void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs); +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs); +int iommu_dma_supported(struct device *dev, u64 mask); +int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +#else + +struct iommu_domain; + +static inline int iommu_dma_init(void) +{ + return 0; +} + +static inline int iommu_get_dma_cookie(struct iommu_domain *domain) +{ + return -ENODEV; +} + +static inline void iommu_put_dma_cookie(struct iommu_domain *domain) +{ +} + +#endif /* CONFIG_IOMMU_DMA */ +#endif /* __KERNEL__ */ +#endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index 234393a6997b..79df69dc629c 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -35,14 +35,23 @@ struct hsu_dma_chip { unsigned int length; unsigned int offset; struct hsu_dma *hsu; - struct hsu_dma_platform_data *pdata; }; +#if IS_ENABLED(CONFIG_HSU_DMA) /* Export to the internal users */ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr); /* Export to the platform drivers */ int hsu_dma_probe(struct hsu_dma_chip *chip); int hsu_dma_remove(struct hsu_dma_chip *chip); +#else +static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, + unsigned short nr) +{ + return IRQ_NONE; +} +static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; } +static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; } +#endif /* CONFIG_HSU_DMA */ #endif /* _DMA_HSU_H */ diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 7ac17f57250e..187c10299722 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -20,6 +20,14 @@ #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 +/* Extended context entry types */ +#define CONTEXT_TT_PT_PASID 4 +#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 +#define CONTEXT_TT_MASK (7ULL << 2) + +#define CONTEXT_DINVE (1ULL << 8) +#define CONTEXT_PRS (1ULL << 9) +#define CONTEXT_PASIDE (1ULL << 11) struct intel_iommu; struct dmar_domain; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7ea9184eaa13..c47c68e535e8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -645,6 +645,7 @@ enum dmaengine_alignment { * The function takes a buffer of size buf_len. The callback function will * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -727,6 +728,9 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)( + struct dma_chan *chan, dma_addr_t dst, u64 data, + unsigned long flags); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); diff --git a/include/linux/edac.h b/include/linux/edac.h index da3b72e95db3..4fe67b853de0 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -769,12 +769,10 @@ struct mem_ctl_info { /* the internal state of this controller instance */ int op_state; -#ifdef CONFIG_EDAC_DEBUG struct dentry *debugfs; u8 fake_inject_layer[EDAC_MAX_LAYERS]; - u32 fake_inject_ue; + bool fake_inject_ue; u16 fake_inject_count; -#endif }; /* diff --git a/include/linux/efi.h b/include/linux/efi.h index 85ef051ac6fb..569b5a866bb1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -99,6 +99,7 @@ typedef struct { #define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ #define EFI_MEMORY_MORE_RELIABLE \ ((u64)0x0000000000010000ULL) /* higher reliability */ +#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ #define EFI_MEMORY_DESCRIPTOR_VERSION 1 @@ -595,6 +596,9 @@ void efi_native_runtime_setup(void); #define DEVICE_TREE_GUID \ EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 ) +#define EFI_PROPERTIES_TABLE_GUID \ + EFI_GUID( 0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5 ) + typedef struct { efi_guid_t guid; u64 table; @@ -676,7 +680,7 @@ typedef struct { } efi_system_table_t; struct efi_memory_map { - void *phys_map; + phys_addr_t phys_map; void *map; void *map_end; int nr_map; @@ -808,6 +812,15 @@ typedef struct _efi_file_io_interface { #define EFI_FILE_MODE_WRITE 0x0000000000000002 #define EFI_FILE_MODE_CREATE 0x8000000000000000 +typedef struct { + u32 version; + u32 length; + u64 memory_protection_attribute; +} efi_properties_table_t; + +#define EFI_PROPERTIES_TABLE_VERSION 0x00010000 +#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 + #define EFI_INVALID_TABLE_ADDR (~0UL) /* @@ -830,6 +843,7 @@ extern struct efi { unsigned long runtime; /* runtime table */ unsigned long config_table; /* config tables */ unsigned long esrt; /* ESRT table */ + unsigned long properties_table; /* properties table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -901,13 +915,19 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_get_time(struct timespec *now); extern void efi_reserve_boot_services(void); -extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); +extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct efi_memory_map memmap; extern struct kobject *efi_kobj; extern int efi_reboot_quirk_mode; extern bool efi_poweroff_required(void); +#ifdef CONFIG_EFI_FAKE_MEMMAP +extern void __init efi_fake_memmap(void); +#else +static inline void efi_fake_memmap(void) { } +#endif + /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc(m, md) \ for ((md) = (m)->map; \ @@ -959,6 +979,7 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ +#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ #ifdef CONFIG_EFI /* diff --git a/include/linux/extcon.h b/include/linux/extcon.h index c0f8c4fc5d45..7abf674c388c 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -31,32 +31,42 @@ /* * Define the unique id of supported external connectors */ -#define EXTCON_NONE 0 - -#define EXTCON_USB 1 /* USB connector */ -#define EXTCON_USB_HOST 2 - -#define EXTCON_TA 3 /* Charger connector */ -#define EXTCON_FAST_CHARGER 4 -#define EXTCON_SLOW_CHARGER 5 -#define EXTCON_CHARGE_DOWNSTREAM 6 - -#define EXTCON_LINE_IN 7 /* Audio/Video connector */ -#define EXTCON_LINE_OUT 8 -#define EXTCON_MICROPHONE 9 -#define EXTCON_HEADPHONE 10 -#define EXTCON_HDMI 11 -#define EXTCON_MHL 12 -#define EXTCON_DVI 13 -#define EXTCON_VGA 14 -#define EXTCON_SPDIF_IN 15 -#define EXTCON_SPDIF_OUT 16 -#define EXTCON_VIDEO_IN 17 -#define EXTCON_VIDEO_OUT 18 - -#define EXTCON_DOCK 19 /* Misc connector */ -#define EXTCON_JIG 20 -#define EXTCON_MECHANICAL 21 +#define EXTCON_NONE 0 + +/* USB external connector */ +#define EXTCON_USB 1 +#define EXTCON_USB_HOST 2 + +/* Charging external connector */ +#define EXTCON_CHG_USB_SDP 5 /* Standard Downstream Port */ +#define EXTCON_CHG_USB_DCP 6 /* Dedicated Charging Port */ +#define EXTCON_CHG_USB_CDP 7 /* Charging Downstream Port */ +#define EXTCON_CHG_USB_ACA 8 /* Accessory Charger Adapter */ +#define EXTCON_CHG_USB_FAST 9 +#define EXTCON_CHG_USB_SLOW 10 + +/* Jack external connector */ +#define EXTCON_JACK_MICROPHONE 20 +#define EXTCON_JACK_HEADPHONE 21 +#define EXTCON_JACK_LINE_IN 22 +#define EXTCON_JACK_LINE_OUT 23 +#define EXTCON_JACK_VIDEO_IN 24 +#define EXTCON_JACK_VIDEO_OUT 25 +#define EXTCON_JACK_SPDIF_IN 26 /* Sony Philips Digital InterFace */ +#define EXTCON_JACK_SPDIF_OUT 27 + +/* Display external connector */ +#define EXTCON_DISP_HDMI 40 /* High-Definition Multimedia Interface */ +#define EXTCON_DISP_MHL 41 /* Mobile High-Definition Link */ +#define EXTCON_DISP_DVI 42 /* Digital Visual Interface */ +#define EXTCON_DISP_VGA 43 /* Video Graphics Array */ + +/* Miscellaneous external connector */ +#define EXTCON_DOCK 60 +#define EXTCON_JIG 61 +#define EXTCON_MECHANICAL 62 + +#define EXTCON_NUM 63 struct extcon_cable; diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h index 0b17ad43fbfc..7cacafb78b09 100644 --- a/include/linux/extcon/extcon-gpio.h +++ b/include/linux/extcon/extcon-gpio.h @@ -1,5 +1,5 @@ /* - * External connector (extcon) class generic GPIO driver + * Single-state GPIO extcon driver based on extcon class * * Copyright (C) 2012 Samsung Electronics * Author: MyungJoo Ham <myungjoo.ham@samsung.com> @@ -16,43 +16,31 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * -*/ + */ #ifndef __EXTCON_GPIO_H__ #define __EXTCON_GPIO_H__ __FILE__ #include <linux/extcon.h> /** - * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device. - * @name: The name of this GPIO extcon device. + * struct gpio_extcon_pdata - A simple GPIO-controlled extcon device. + * @extcon_id: The unique id of specific external connector. * @gpio: Corresponding GPIO. * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0 * If true, low state of gpio means active. * If false, high state of gpio means active. * @debounce: Debounce time for GPIO IRQ in ms. * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). - * @state_on: print_state is overriden with state_on if attached. - * If NULL, default method of extcon class is used. - * @state_off: print_state is overriden with state_off if detached. - * If NUll, default method of extcon class is used. * @check_on_resume: Boolean describing whether to check the state of gpio * while resuming from sleep. - * - * Note that in order for state_on or state_off to be valid, both state_on - * and state_off should be not NULL. If at least one of them is NULL, - * the print_state is not overriden. */ -struct gpio_extcon_platform_data { - const char *name; +struct gpio_extcon_pdata { + unsigned int extcon_id; unsigned gpio; bool gpio_active_low; unsigned long debounce; unsigned long irq_flags; - /* if NULL, "0" or "1" will be printed */ - const char *state_on; - const char *state_off; bool check_on_resume; }; diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 798fad9e420d..3159a7dba034 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -18,7 +18,7 @@ struct fault_attr { atomic_t times; atomic_t space; unsigned long verbose; - u32 task_filter; + bool task_filter; unsigned long stacktrace_depth; unsigned long require_start; unsigned long require_end; diff --git a/include/linux/fb.h b/include/linux/fb.h index bc9afa74ee11..41a3b11f7796 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -483,7 +483,10 @@ struct fb_info { #ifdef CONFIG_FB_TILEBLITTING struct fb_tile_ops *tileops; /* Tile Blitting */ #endif - char __iomem *screen_base; /* Virtual address */ + union { + char __iomem *screen_base; /* Virtual address */ + char *screen_buffer; + }; unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ void *pseudo_palette; /* Fake palette of 16 colors */ #define FBINFO_STATE_RUNNING 0 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 674e3e226465..5295535b60c6 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -26,6 +26,7 @@ struct fdtable { struct file __rcu **fd; /* current fd array */ unsigned long *close_on_exec; unsigned long *open_fds; + unsigned long *full_fds_bits; struct rcu_head rcu; }; @@ -59,6 +60,7 @@ struct files_struct { int next_fd; unsigned long close_on_exec_init[1]; unsigned long open_fds_init[1]; + unsigned long full_fds_bits_init[1]; struct file __rcu * fd_array[NR_OPEN_DEFAULT]; }; diff --git a/include/linux/filter.h b/include/linux/filter.h index fa2cab985e57..4165e9ac9e36 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -13,6 +13,7 @@ #include <linux/printk.h> #include <linux/workqueue.h> #include <linux/sched.h> +#include <net/sch_generic.h> #include <asm/cacheflush.h> @@ -302,10 +303,6 @@ struct bpf_prog_aux; bpf_size; \ }) -/* Macro to invoke filter function. */ -#define SK_RUN_FILTER(filter, ctx) \ - (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) - #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { @@ -326,8 +323,12 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ - bool jited; /* Is our filter JIT'ed? */ - bool gpl_compatible; /* Is our filter GPL compatible? */ + kmemcheck_bitfield_begin(meta); + u16 jited:1, /* Is our filter JIT'ed? */ + gpl_compatible:1, /* Is filter GPL compatible? */ + cb_access:1, /* Is control block accessed? */ + dst_needed:1; /* Do we need dst entry? */ + kmemcheck_bitfield_end(meta); u32 len; /* Number of filter blocks */ enum bpf_prog_type type; /* Type of BPF program */ struct bpf_prog_aux *aux; /* Auxiliary fields */ @@ -349,6 +350,39 @@ struct sk_filter { #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) +static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = qdisc_skb_cb(skb)->data; + u8 saved_cb[QDISC_CB_PRIV_LEN]; + u32 res; + + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != + QDISC_CB_PRIV_LEN); + + if (unlikely(prog->cb_access)) { + memcpy(saved_cb, cb_data, sizeof(saved_cb)); + memset(cb_data, 0, sizeof(saved_cb)); + } + + res = BPF_PROG_RUN(prog, skb); + + if (unlikely(prog->cb_access)) + memcpy(cb_data, saved_cb, sizeof(saved_cb)); + + return res; +} + +static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, + struct sk_buff *skb) +{ + u8 *cb_data = qdisc_skb_cb(skb)->data; + + if (unlikely(prog->cb_access)) + memset(cb_data, 0, QDISC_CB_PRIV_LEN); + return BPF_PROG_RUN(prog, skb); +} + static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), @@ -408,7 +442,7 @@ typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, - bpf_aux_classic_check_t trans); + bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h new file mode 100644 index 000000000000..0940bf45e2f2 --- /dev/null +++ b/include/linux/fpga/fpga-mgr.h @@ -0,0 +1,127 @@ +/* + * FPGA Framework + * + * Copyright (C) 2013-2015 Altera Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/mutex.h> +#include <linux/platform_device.h> + +#ifndef _LINUX_FPGA_MGR_H +#define _LINUX_FPGA_MGR_H + +struct fpga_manager; + +/** + * enum fpga_mgr_states - fpga framework states + * @FPGA_MGR_STATE_UNKNOWN: can't determine state + * @FPGA_MGR_STATE_POWER_OFF: FPGA power is off + * @FPGA_MGR_STATE_POWER_UP: FPGA reports power is up + * @FPGA_MGR_STATE_RESET: FPGA in reset state + * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress + * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed + * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming + * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage + * @FPGA_MGR_STATE_WRITE: writing image to FPGA + * @FPGA_MGR_STATE_WRITE_ERR: Error while writing FPGA + * @FPGA_MGR_STATE_WRITE_COMPLETE: Doing post programming steps + * @FPGA_MGR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE + * @FPGA_MGR_STATE_OPERATING: FPGA is programmed and operating + */ +enum fpga_mgr_states { + /* default FPGA states */ + FPGA_MGR_STATE_UNKNOWN, + FPGA_MGR_STATE_POWER_OFF, + FPGA_MGR_STATE_POWER_UP, + FPGA_MGR_STATE_RESET, + + /* getting an image for loading */ + FPGA_MGR_STATE_FIRMWARE_REQ, + FPGA_MGR_STATE_FIRMWARE_REQ_ERR, + + /* write sequence: init, write, complete */ + FPGA_MGR_STATE_WRITE_INIT, + FPGA_MGR_STATE_WRITE_INIT_ERR, + FPGA_MGR_STATE_WRITE, + FPGA_MGR_STATE_WRITE_ERR, + FPGA_MGR_STATE_WRITE_COMPLETE, + FPGA_MGR_STATE_WRITE_COMPLETE_ERR, + + /* fpga is programmed and operating */ + FPGA_MGR_STATE_OPERATING, +}; + +/* + * FPGA Manager flags + * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported + */ +#define FPGA_MGR_PARTIAL_RECONFIG BIT(0) + +/** + * struct fpga_manager_ops - ops for low level fpga manager drivers + * @state: returns an enum value of the FPGA's state + * @write_init: prepare the FPGA to receive confuration data + * @write: write count bytes of configuration data to the FPGA + * @write_complete: set FPGA to operating state after writing is done + * @fpga_remove: optional: Set FPGA into a specific state during driver remove + * + * fpga_manager_ops are the low level functions implemented by a specific + * fpga manager driver. The optional ones are tested for NULL before being + * called, so leaving them out is fine. + */ +struct fpga_manager_ops { + enum fpga_mgr_states (*state)(struct fpga_manager *mgr); + int (*write_init)(struct fpga_manager *mgr, u32 flags, + const char *buf, size_t count); + int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); + int (*write_complete)(struct fpga_manager *mgr, u32 flags); + void (*fpga_remove)(struct fpga_manager *mgr); +}; + +/** + * struct fpga_manager - fpga manager structure + * @name: name of low level fpga manager + * @dev: fpga manager device + * @ref_mutex: only allows one reference to fpga manager + * @state: state of fpga manager + * @mops: pointer to struct of fpga manager ops + * @priv: low level driver private date + */ +struct fpga_manager { + const char *name; + struct device dev; + struct mutex ref_mutex; + enum fpga_mgr_states state; + const struct fpga_manager_ops *mops; + void *priv; +}; + +#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) + +int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, + const char *buf, size_t count); + +int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, + const char *image_name); + +struct fpga_manager *of_fpga_mgr_get(struct device_node *node); + +void fpga_mgr_put(struct fpga_manager *mgr); + +int fpga_mgr_register(struct device *dev, const char *name, + const struct fpga_manager_ops *mops, void *priv); + +void fpga_mgr_unregister(struct device *dev); + +#endif /*_LINUX_FPGA_MGR_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 72d8a844c692..9a1cb8c605e0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1053,12 +1053,11 @@ extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); -extern int posix_lock_inode_wait(struct inode *, struct file_lock *); extern int posix_unblock_lock(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); -extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl); +extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); @@ -1144,12 +1143,6 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, return -ENOLCK; } -static inline int posix_lock_inode_wait(struct inode *inode, - struct file_lock *fl) -{ - return -ENOLCK; -} - static inline int posix_unblock_lock(struct file_lock *waiter) { return -ENOENT; @@ -1171,8 +1164,7 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) return 0; } -static inline int flock_lock_inode_wait(struct inode *inode, - struct file_lock *request) +static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; } @@ -1215,14 +1207,9 @@ static inline struct inode *file_inode(const struct file *f) return f->f_inode; } -static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) -{ - return posix_lock_inode_wait(file_inode(filp), fl); -} - -static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl) +static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) { - return flock_lock_inode_wait(file_inode(filp), fl); + return locks_lock_inode_wait(file_inode(filp), fl); } struct fasync_struct { @@ -2422,6 +2409,7 @@ extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait(struct address_space *); +extern void filemap_fdatawait_keep_errors(struct address_space *); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); extern int filemap_write_and_wait(struct address_space *mapping); diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h new file mode 100644 index 000000000000..84d971ff3fba --- /dev/null +++ b/include/linux/fsl/guts.h @@ -0,0 +1,192 @@ +/** + * Freecale 85xx and 86xx Global Utilties register set + * + * Authors: Jeff Brown + * Timur Tabi <timur@freescale.com> + * + * Copyright 2004,2007,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __FSL_GUTS_H__ +#define __FSL_GUTS_H__ + +#include <linux/types.h> + +/** + * Global Utility Registers. + * + * Not all registers defined in this structure are available on all chips, so + * you are expected to know whether a given register actually exists on your + * chip before you access it. + * + * Also, some registers are similar on different chips but have slightly + * different names. In these cases, one name is chosen to avoid extraneous + * #ifdefs. + */ +struct ccsr_guts { + __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ + __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; + __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u8 res024[0x30 - 0x24]; + __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; + __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u8 res044[0x50 - 0x44]; + __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u8 res054[0x60 - 0x54]; + __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ + __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ + __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; + __be32 devdisr; /* 0x.0070 - Device Disable Control */ +#define CCSR_GUTS_DEVDISR_TB1 0x00001000 +#define CCSR_GUTS_DEVDISR_TB0 0x00004000 + __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; + __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ + __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ + __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ + __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ + __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ + __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ + __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ + __be32 autorstsr; /* 0x.009c - Automatic reset status register */ + __be32 pvr; /* 0x.00a0 - Processor Version Register */ + __be32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; + __be32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; + __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; + __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; + __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; + __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; + __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; + __be32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; + __be32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; + __be32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; + __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; + __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; + __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; + __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u8 rese28[0xf04 - 0xe28]; + __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; + __be32 itcr; /* 0x.0f2c - Internal transaction control register */ + u8 resf30[0xf40 - 0xf30]; + __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ +} __attribute__ ((packed)); + + +/* Alternate function signal multiplex control */ +#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) + +#ifdef CONFIG_PPC_86xx + +#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ +#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ + +/* + * Set the DMACR register in the GUTS + * + * The DMACR register determines the source of initiated transfers for each + * channel on each DMA controller. Rather than have a bunch of repetitive + * macros for the bit patterns, we just have a function that calculates + * them. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) + */ +static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int device) +{ + unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); + + clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); +} + +#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 +#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ +#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ +#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ +#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ +#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ +#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 +#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 +#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 +#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 + +/* + * Set the DMA external control bits in the GUTS + * + * The DMA external control bits in the PMUXCR are only meaningful for + * channels 0 and 3. Any other channels are ignored. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * value: the new value for the bit (0 or 1) + */ +static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int value) +{ + if ((ch == 0) || (ch == 3)) { + unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; + + clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); + } +} + +#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 +#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 +#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ + (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) +#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 +#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 +#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) +#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF +#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) + +#endif + +#endif diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 0408545bce42..851671742790 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -16,7 +16,9 @@ enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF, FWNODE_ACPI, + FWNODE_ACPI_DATA, FWNODE_PDATA, + FWNODE_IRQCHIP, }; struct fwnode_handle { diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 09460d6d6682..a4c61cbce777 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -8,7 +8,7 @@ extern void genl_lock(void); extern void genl_unlock(void); #ifdef CONFIG_LOCKDEP -extern int lockdep_genl_is_held(void); +extern bool lockdep_genl_is_held(void); #endif /* for synchronisation between af_netlink and genetlink */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 2adbfa6d02bc..847cc1d91634 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -163,6 +163,18 @@ struct disk_part_tbl { struct disk_events; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +struct blk_integrity { + struct blk_integrity_profile *profile; + unsigned char flags; + unsigned char tuple_size; + unsigned char interval_exp; + unsigned char tag_size; +}; + +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). @@ -198,8 +210,8 @@ struct gendisk { atomic_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_INTEGRITY - struct blk_integrity *integrity; -#endif + struct kobject integrity_kobj; +#endif /* CONFIG_BLK_DEV_INTEGRITY */ int node_id; }; @@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) #endif } +#if defined(CONFIG_BLK_DEV_INTEGRITY) +extern void blk_integrity_add(struct gendisk *); +extern void blk_integrity_del(struct gendisk *); +extern void blk_integrity_revalidate(struct gendisk *); +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline void blk_integrity_add(struct gendisk *disk) { } +static inline void blk_integrity_del(struct gendisk *disk) { } +static inline void blk_integrity_revalidate(struct gendisk *disk) { } +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 14cac67c2012..fb0fde686cb1 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -400,6 +400,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio) { return ERR_PTR(-EINVAL); } + static inline int desc_to_gpio(const struct gpio_desc *desc) { /* GPIO can never have been requested */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1aed31c5ffba..d1baebf350d8 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -206,6 +206,9 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, #endif /* CONFIG_GPIOLIB_IRQCHIP */ +int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); +void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); + #ifdef CONFIG_PINCTRL /** diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5e35379f58a5..685c262e0be8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -483,6 +483,17 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, #define hugepages_supported() (HPAGE_SHIFT != 0) #endif +void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); + +static inline void hugetlb_count_add(long l, struct mm_struct *mm) +{ + atomic_long_add(l, &mm->hugetlb_usage); +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ + atomic_long_sub(l, &mm->hugetlb_usage); +} #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; #define alloc_huge_page(v, a, r) NULL @@ -519,6 +530,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, { return &mm->page_table_lock; } + +static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) +{ +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index bcc853eccc85..7edd30515298 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -48,9 +48,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) static inline bool hugetlb_cgroup_disabled(void) { - if (hugetlb_cgrp_subsys.disabled) - return true; - return false; + return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); } extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 54733d5b503e..8fdc17b84739 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -26,6 +26,7 @@ #define _HYPERV_H #include <uapi/linux/hyperv.h> +#include <uapi/asm/hyperv.h> #include <linux/types.h> #include <linux/scatterlist.h> diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index cfa906f28b7a..452c0b0d2f32 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -121,7 +121,7 @@ #define IEEE80211_MAX_SN IEEE80211_SN_MASK #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) -static inline int ieee80211_sn_less(u16 sn1, u16 sn2) +static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) { return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); } @@ -250,7 +250,7 @@ struct ieee80211_qos_hdr { * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_tods(__le16 fc) +static inline bool ieee80211_has_tods(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0; } @@ -259,7 +259,7 @@ static inline int ieee80211_has_tods(__le16 fc) * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_fromds(__le16 fc) +static inline bool ieee80211_has_fromds(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0; } @@ -268,7 +268,7 @@ static inline int ieee80211_has_fromds(__le16 fc) * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_a4(__le16 fc) +static inline bool ieee80211_has_a4(__le16 fc) { __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS); return (fc & tmp) == tmp; @@ -278,7 +278,7 @@ static inline int ieee80211_has_a4(__le16 fc) * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_morefrags(__le16 fc) +static inline bool ieee80211_has_morefrags(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0; } @@ -287,7 +287,7 @@ static inline int ieee80211_has_morefrags(__le16 fc) * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_retry(__le16 fc) +static inline bool ieee80211_has_retry(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0; } @@ -296,7 +296,7 @@ static inline int ieee80211_has_retry(__le16 fc) * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_pm(__le16 fc) +static inline bool ieee80211_has_pm(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0; } @@ -305,7 +305,7 @@ static inline int ieee80211_has_pm(__le16 fc) * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_moredata(__le16 fc) +static inline bool ieee80211_has_moredata(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0; } @@ -314,7 +314,7 @@ static inline int ieee80211_has_moredata(__le16 fc) * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_protected(__le16 fc) +static inline bool ieee80211_has_protected(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0; } @@ -323,7 +323,7 @@ static inline int ieee80211_has_protected(__le16 fc) * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_has_order(__le16 fc) +static inline bool ieee80211_has_order(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0; } @@ -332,7 +332,7 @@ static inline int ieee80211_has_order(__le16 fc) * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_mgmt(__le16 fc) +static inline bool ieee80211_is_mgmt(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT); @@ -342,7 +342,7 @@ static inline int ieee80211_is_mgmt(__le16 fc) * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_ctl(__le16 fc) +static inline bool ieee80211_is_ctl(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL); @@ -352,7 +352,7 @@ static inline int ieee80211_is_ctl(__le16 fc) * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_data(__le16 fc) +static inline bool ieee80211_is_data(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA); @@ -362,7 +362,7 @@ static inline int ieee80211_is_data(__le16 fc) * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_data_qos(__le16 fc) +static inline bool ieee80211_is_data_qos(__le16 fc) { /* * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need @@ -376,7 +376,7 @@ static inline int ieee80211_is_data_qos(__le16 fc) * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_data_present(__le16 fc) +static inline bool ieee80211_is_data_present(__le16 fc) { /* * mask with 0x40 and test that that bit is clear to only return true @@ -390,7 +390,7 @@ static inline int ieee80211_is_data_present(__le16 fc) * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_assoc_req(__le16 fc) +static inline bool ieee80211_is_assoc_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ); @@ -400,7 +400,7 @@ static inline int ieee80211_is_assoc_req(__le16 fc) * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_assoc_resp(__le16 fc) +static inline bool ieee80211_is_assoc_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP); @@ -410,7 +410,7 @@ static inline int ieee80211_is_assoc_resp(__le16 fc) * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_reassoc_req(__le16 fc) +static inline bool ieee80211_is_reassoc_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ); @@ -420,7 +420,7 @@ static inline int ieee80211_is_reassoc_req(__le16 fc) * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_reassoc_resp(__le16 fc) +static inline bool ieee80211_is_reassoc_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP); @@ -430,7 +430,7 @@ static inline int ieee80211_is_reassoc_resp(__le16 fc) * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_probe_req(__le16 fc) +static inline bool ieee80211_is_probe_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); @@ -440,7 +440,7 @@ static inline int ieee80211_is_probe_req(__le16 fc) * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_probe_resp(__le16 fc) +static inline bool ieee80211_is_probe_resp(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); @@ -450,7 +450,7 @@ static inline int ieee80211_is_probe_resp(__le16 fc) * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_beacon(__le16 fc) +static inline bool ieee80211_is_beacon(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); @@ -460,7 +460,7 @@ static inline int ieee80211_is_beacon(__le16 fc) * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_atim(__le16 fc) +static inline bool ieee80211_is_atim(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM); @@ -470,7 +470,7 @@ static inline int ieee80211_is_atim(__le16 fc) * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_disassoc(__le16 fc) +static inline bool ieee80211_is_disassoc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC); @@ -480,7 +480,7 @@ static inline int ieee80211_is_disassoc(__le16 fc) * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_auth(__le16 fc) +static inline bool ieee80211_is_auth(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); @@ -490,7 +490,7 @@ static inline int ieee80211_is_auth(__le16 fc) * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_deauth(__le16 fc) +static inline bool ieee80211_is_deauth(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH); @@ -500,7 +500,7 @@ static inline int ieee80211_is_deauth(__le16 fc) * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_action(__le16 fc) +static inline bool ieee80211_is_action(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -510,7 +510,7 @@ static inline int ieee80211_is_action(__le16 fc) * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_back_req(__le16 fc) +static inline bool ieee80211_is_back_req(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ); @@ -520,7 +520,7 @@ static inline int ieee80211_is_back_req(__le16 fc) * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_back(__le16 fc) +static inline bool ieee80211_is_back(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK); @@ -530,7 +530,7 @@ static inline int ieee80211_is_back(__le16 fc) * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_pspoll(__le16 fc) +static inline bool ieee80211_is_pspoll(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); @@ -540,7 +540,7 @@ static inline int ieee80211_is_pspoll(__le16 fc) * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_rts(__le16 fc) +static inline bool ieee80211_is_rts(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); @@ -550,7 +550,7 @@ static inline int ieee80211_is_rts(__le16 fc) * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_cts(__le16 fc) +static inline bool ieee80211_is_cts(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); @@ -560,7 +560,7 @@ static inline int ieee80211_is_cts(__le16 fc) * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_ack(__le16 fc) +static inline bool ieee80211_is_ack(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); @@ -570,7 +570,7 @@ static inline int ieee80211_is_ack(__le16 fc) * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_cfend(__le16 fc) +static inline bool ieee80211_is_cfend(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND); @@ -580,7 +580,7 @@ static inline int ieee80211_is_cfend(__le16 fc) * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_cfendack(__le16 fc) +static inline bool ieee80211_is_cfendack(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK); @@ -590,7 +590,7 @@ static inline int ieee80211_is_cfendack(__le16 fc) * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_nullfunc(__le16 fc) +static inline bool ieee80211_is_nullfunc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC); @@ -600,7 +600,7 @@ static inline int ieee80211_is_nullfunc(__le16 fc) * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder */ -static inline int ieee80211_is_qos_nullfunc(__le16 fc) +static inline bool ieee80211_is_qos_nullfunc(__le16 fc) { return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC); @@ -624,7 +624,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set * @seq_ctrl: frame sequence control bytes in little-endian byteorder */ -static inline int ieee80211_is_first_frag(__le16 seq_ctrl) +static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) { return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } @@ -1379,6 +1379,7 @@ struct ieee80211_ht_operation { /* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 @@ -1745,8 +1746,7 @@ enum ieee80211_eid { WLAN_EID_TIM = 5, WLAN_EID_IBSS_PARAMS = 6, WLAN_EID_COUNTRY = 7, - WLAN_EID_HP_PARAMS = 8, - WLAN_EID_HP_TABLE = 9, + /* 8, 9 reserved */ WLAN_EID_REQUEST = 10, WLAN_EID_QBSS_LOAD = 11, WLAN_EID_EDCA_PARAM_SET = 12, @@ -1932,6 +1932,8 @@ enum ieee80211_category { WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, + WLAN_CATEGORY_WNM = 10, + WLAN_CATEGORY_WNM_UNPROTECTED = 11, WLAN_CATEGORY_TDLS = 12, WLAN_CATEGORY_MESH_ACTION = 13, WLAN_CATEGORY_MULTIHOP_ACTION = 14, @@ -2396,7 +2398,10 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) category = ((u8 *) hdr) + 24; return *category != WLAN_CATEGORY_PUBLIC && *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_WNM_UNPROTECTED && *category != WLAN_CATEGORY_SELF_PROTECTED && + *category != WLAN_CATEGORY_UNPROT_DMG && + *category != WLAN_CATEGORY_VHT && *category != WLAN_CATEGORY_VENDOR_SPECIFIC; } diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 1dc1f4ed4001..d3e415674dac 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -25,12 +25,22 @@ #include <linux/types.h> #include <linux/random.h> -#include <asm/byteorder.h> #define IEEE802154_MTU 127 #define IEEE802154_ACK_PSDU_LEN 5 #define IEEE802154_MIN_PSDU_LEN 9 #define IEEE802154_FCS_LEN 2 +#define IEEE802154_MAX_AUTH_TAG_LEN 16 + +/* General MAC frame format: + * 2 bytes: Frame Control + * 1 byte: Sequence Number + * 20 bytes: Addressing fields + * 14 bytes: Auxiliary Security Header + */ +#define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14) +#define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \ + IEEE802154_FCS_LEN) #define IEEE802154_PAN_ID_BROADCAST 0xffff #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff @@ -205,6 +215,41 @@ enum { IEEE802154_SCAN_IN_PROGRESS = 0xfc, }; +/* frame control handling */ +#define IEEE802154_FCTL_FTYPE 0x0003 +#define IEEE802154_FCTL_ACKREQ 0x0020 +#define IEEE802154_FCTL_INTRA_PAN 0x0040 + +#define IEEE802154_FTYPE_DATA 0x0001 + +/* + * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA + * @fc: frame control bytes in little-endian byteorder + */ +static inline int ieee802154_is_data(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) == + cpu_to_le16(IEEE802154_FTYPE_DATA); +} + +/** + * ieee802154_is_ackreq - check if acknowledgment request bit is set + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_ackreq(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ); +} + +/** + * ieee802154_is_intra_pan - check if intra pan id communication + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee802154_is_intra_pan(__le16 fc) +{ + return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN); +} + /** * ieee802154_is_valid_psdu_len - check if psdu len is valid * available lengths: diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index dad8b00beed2..a338a688ee4a 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -46,6 +46,12 @@ struct br_ip_list { #define BR_LEARNING_SYNC BIT(9) #define BR_PROXYARP_WIFI BIT(10) +/* values as per ieee8021QBridgeFdbAgingTime */ +#define BR_MIN_AGEING_TIME (10 * HZ) +#define BR_MAX_AGEING_TIME (1000000 * HZ) + +#define BR_DEFAULT_AGEING_TIME (300 * HZ) + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int br_should_route_hook_t(struct sk_buff *skb); diff --git a/include/linux/if_link.h b/include/linux/if_link.h index ae5d0d22955d..f923d15b432c 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -24,5 +24,6 @@ struct ifla_vf_info { __u32 min_tx_rate; __u32 max_tx_rate; __u32 rss_query_en; + __u32 trusted; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 908429216d9f..9c9de11549a7 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -110,7 +110,7 @@ struct ip_mc_list { #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) -extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto); +extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); extern int igmp_rcv(struct sk_buff *); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 3c17cd7fdf06..2fe939c73cd2 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -271,6 +271,10 @@ void st_sensors_power_enable(struct iio_dev *indio_dev); void st_sensors_power_disable(struct iio_dev *indio_dev); +int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, + unsigned reg, unsigned writeval, + unsigned *readval); + int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr); int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 7bb7f673cb3f..19c94c9acc81 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -294,6 +294,7 @@ static inline s64 iio_get_time_ns(void) #define INDIO_BUFFER_TRIGGERED 0x02 #define INDIO_BUFFER_SOFTWARE 0x04 #define INDIO_BUFFER_HARDWARE 0x08 +#define INDIO_EVENT_TRIGGERED 0x10 #define INDIO_ALL_BUFFER_MODES \ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) @@ -457,6 +458,7 @@ struct iio_buffer_setup_ops { * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) * @pollfunc: [DRIVER] function run on trigger being received + * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. * @channel_attr_list: [INTERN] keep track of automatically created channel @@ -495,6 +497,7 @@ struct iio_dev { unsigned scan_index_timestamp; struct iio_trigger *trig; struct iio_poll_func *pollfunc; + struct iio_poll_func *pollfunc_event; struct iio_chan_spec const *channels; int num_channels; diff --git a/include/linux/iio/triggered_event.h b/include/linux/iio/triggered_event.h new file mode 100644 index 000000000000..8fe8537085bb --- /dev/null +++ b/include/linux/iio/triggered_event.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_IIO_TRIGGERED_EVENT_H_ +#define _LINUX_IIO_TRIGGERED_EVENT_H_ + +#include <linux/interrupt.h> + +int iio_triggered_event_setup(struct iio_dev *indio_dev, + irqreturn_t (*h)(int irq, void *p), + irqreturn_t (*thread)(int irq, void *p)); +void iio_triggered_event_cleanup(struct iio_dev *indio_dev); + +#endif diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index a4328cea376a..ee971f335a8b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -171,7 +171,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst, __be32 local, int scope); struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); -static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) +static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) { return !((addr^ifa->ifa_address)&ifa->ifa_mask); } @@ -180,15 +180,15 @@ static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) * Check if a mask is acceptable. */ -static __inline__ int bad_mask(__be32 mask, __be32 addr) +static __inline__ bool bad_mask(__be32 mask, __be32 addr) { __u32 hmask; if (addr & (mask = ~mask)) - return 1; + return true; hmask = ntohl(mask); if (hmask & (hmask+1)) - return 1; - return 0; + return true; + return false; } #define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d0b380ee7d67..1c1ff7e4faa4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -52,7 +52,8 @@ extern struct fs_struct init_fs; .rlim = INIT_RLIMITS, \ .cputimer = { \ .cputime_atomic = INIT_CPUTIME_ATOMIC, \ - .running = 0, \ + .running = false, \ + .checking_timer = false, \ }, \ INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6240063bdcac..821273ca4873 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -1,5 +1,9 @@ /* - * Copyright (c) 2006, Intel Corporation. + * Copyright © 2006-2015, Intel Corporation. + * + * Authors: Ashok Raj <ashok.raj@intel.com> + * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * David Woodhouse <David.Woodhouse@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -13,10 +17,6 @@ * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj <ashok.raj@intel.com> - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #ifndef _INTEL_IOMMU_H_ @@ -25,7 +25,10 @@ #include <linux/types.h> #include <linux/iova.h> #include <linux/io.h> +#include <linux/idr.h> #include <linux/dma_remapping.h> +#include <linux/mmu_notifier.h> +#include <linux/list.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -57,16 +60,21 @@ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ +#define DMAR_PQH_REG 0xc0 /* Page request queue head register */ +#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ +#define DMAR_PQA_REG 0xd0 /* Page request queue address register */ +#define DMAR_PRS_REG 0xdc /* Page request status register */ +#define DMAR_PECTL_REG 0xe0 /* Page request event control register */ +#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ +#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ +#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ #define OFFSET_STRIDE (9) -/* -#define dmar_readl(dmar, reg) readl(dmar + reg) -#define dmar_readq(dmar, reg) ({ \ - u32 lo, hi; \ - lo = readl(dmar + reg); \ - hi = readl(dmar + reg + 4); \ - (((u64) hi) << 32) + lo; }) -*/ + +#ifdef CONFIG_64BIT +#define dmar_readq(a) readq(a) +#define dmar_writeq(a,v) writeq(v,a) +#else static inline u64 dmar_readq(void __iomem *addr) { u32 lo, hi; @@ -80,6 +88,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) writel((u32)val, addr); writel((u32)(val >> 32), addr + 4); } +#endif #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) @@ -123,7 +132,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) -/* PASID support used to be on bit 28 */ +#define ecap_broken_pasid(e) ((e >> 28) & 0x1) #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) @@ -253,6 +262,11 @@ enum { #define QI_DIOTLB_TYPE 0x3 #define QI_IEC_TYPE 0x4 #define QI_IWD_TYPE 0x5 +#define QI_EIOTLB_TYPE 0x6 +#define QI_PC_TYPE 0x7 +#define QI_DEIOTLB_TYPE 0x8 +#define QI_PGRP_RESP_TYPE 0x9 +#define QI_PSTRM_RESP_TYPE 0xa #define QI_IEC_SELECTIVE (((u64)1) << 4) #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) @@ -280,6 +294,53 @@ enum { #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 +#define QI_PC_PASID(pasid) (((u64)pasid) << 32) +#define QI_PC_DID(did) (((u64)did) << 16) +#define QI_PC_GRAN(gran) (((u64)gran) << 4) + +#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) +#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) + +#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) +#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) +#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) +#define QI_EIOTLB_DID(did) (((u64)did) << 16) +#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) + +#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) +#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) +#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) +#define QI_DEV_EIOTLB_MAX_INVS 32 + +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PGRP_RESP_CODE(res) ((u64)(res)) +#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) +#define QI_PGRP_DID(did) (((u64)(did)) << 16) +#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) + +#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) +#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) +#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) +#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) +#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) +#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) + +#define QI_RESP_SUCCESS 0x0 +#define QI_RESP_INVALID 0x1 +#define QI_RESP_FAILURE 0xf + +#define QI_GRAN_ALL_ALL 0 +#define QI_GRAN_NONG_ALL 1 +#define QI_GRAN_NONG_PASID 2 +#define QI_GRAN_PSI_PASID 3 + struct qi_desc { u64 low, high; }; @@ -327,6 +388,10 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) +struct pasid_entry; +struct pasid_state_entry; +struct page_req_dsc; + struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 reg_phys; /* physical address of hw register set */ @@ -338,7 +403,7 @@ struct intel_iommu { int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ - unsigned int irq; + unsigned int irq, pr_irq; u16 segment; /* PCI segment# */ unsigned char name[13]; /* Device Name */ @@ -350,6 +415,18 @@ struct intel_iommu { struct iommu_flush flush; #endif +#ifdef CONFIG_INTEL_IOMMU_SVM + /* These are large and need to be contiguous, so we allocate just + * one for now. We'll maybe want to rethink that if we truly give + * devices away to userspace processes (e.g. for DPDK) and don't + * want to trust that userspace will use *only* the PASID it was + * told to. But while it's all driver-arbitrated, we're fine. */ + struct pasid_entry *pasid_table; + struct pasid_state_entry *pasid_state_table; + struct page_req_dsc *prq; + unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct idr pasid_idr; +#endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -389,6 +466,38 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); +#ifdef CONFIG_INTEL_IOMMU_SVM +extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_enable_prq(struct intel_iommu *iommu); +extern int intel_svm_finish_prq(struct intel_iommu *iommu); + +struct svm_dev_ops; + +struct intel_svm_dev { + struct list_head list; + struct rcu_head rcu; + struct device *dev; + struct svm_dev_ops *ops; + int users; + u16 did; + u16 dev_iotlb:1; + u16 sid, qdep; +}; + +struct intel_svm { + struct mmu_notifier notifier; + struct mm_struct *mm; + struct intel_iommu *iommu; + int flags; + int pasid; + struct list_head devs; +}; + +extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); +extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); +#endif + extern const struct attribute_group *intel_iommu_groups[]; #endif diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h new file mode 100644 index 000000000000..3c25794042f9 --- /dev/null +++ b/include/linux/intel-svm.h @@ -0,0 +1,121 @@ +/* + * Copyright © 2015 Intel Corporation. + * + * Authors: David Woodhouse <David.Woodhouse@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __INTEL_SVM_H__ +#define __INTEL_SVM_H__ + +struct device; + +struct svm_dev_ops { + void (*fault_cb)(struct device *dev, int pasid, u64 address, + u32 private, int rwxp, int response); +}; + +/* Values for rxwp in fault_cb callback */ +#define SVM_REQ_READ (1<<3) +#define SVM_REQ_WRITE (1<<2) +#define SVM_REQ_EXEC (1<<1) +#define SVM_REQ_PRIV (1<<0) + + +/* + * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" + * PASID for the current process. Even if a PASID already exists, a new one + * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID + * will not be given to subsequent callers. This facility allows a driver to + * disambiguate between multiple device contexts which access the same MM, + * if there is no other way to do so. It should be used sparingly, if at all. + */ +#define SVM_FLAG_PRIVATE_PASID (1<<0) + +/* + * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only + * for access to kernel addresses. No IOTLB flushes are automatically done + * for kernel mappings; it is valid only for access to the kernel's static + * 1:1 mapping of physical memory — not to vmalloc or even module mappings. + * A future API addition may permit the use of such ranges, by means of an + * explicit IOTLB flush call (akin to the DMA API's unmap method). + * + * It is unlikely that we will ever hook into flush_tlb_kernel_range() to + * do such IOTLB flushes automatically. + */ +#define SVM_FLAG_SUPERVISOR_MODE (1<<1) + +#ifdef CONFIG_INTEL_IOMMU_SVM + +/** + * intel_svm_bind_mm() - Bind the current process to a PASID + * @dev: Device to be granted acccess + * @pasid: Address for allocated PASID + * @flags: Flags. Later for requesting supervisor mode, etc. + * @ops: Callbacks to device driver + * + * This function attempts to enable PASID support for the given device. + * If the @pasid argument is non-%NULL, a PASID is allocated for access + * to the MM of the current process. + * + * By using a %NULL value for the @pasid argument, this function can + * be used to simply validate that PASID support is available for the + * given device — i.e. that it is behind an IOMMU which has the + * requisite support, and is enabled. + * + * Page faults are handled transparently by the IOMMU code, and there + * should be no need for the device driver to be involved. If a page + * fault cannot be handled (i.e. is an invalid address rather than + * just needs paging in), then the page request will be completed by + * the core IOMMU code with appropriate status, and the device itself + * can then report the resulting fault to its driver via whatever + * mechanism is appropriate. + * + * Multiple calls from the same process may result in the same PASID + * being re-used. A reference count is kept. + */ +extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, + struct svm_dev_ops *ops); + +/** + * intel_svm_unbind_mm() - Unbind a specified PASID + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be unbound + * + * This function allows a PASID to be retired when the device no + * longer requires access to the address space of a given process. + * + * If the use count for the PASID in question reaches zero, the + * PASID is revoked and may no longer be used by hardware. + * + * Device drivers are required to ensure that no access (including + * page requests) is currently outstanding for the PASID in question, + * before calling this function. + */ +extern int intel_svm_unbind_mm(struct device *dev, int pasid); + +#else /* CONFIG_INTEL_IOMMU_SVM */ + +static inline int intel_svm_bind_mm(struct device *dev, int *pasid, + int flags, struct svm_dev_ops *ops) +{ + return -ENOSYS; +} + +static inline int intel_svm_unbind_mm(struct device *dev, int pasid) +{ + BUG(); +} +#endif /* CONFIG_INTEL_IOMMU_SVM */ + +#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) + +#endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index be7e75c945e9..ad16809c8596 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @flags: flags (see IRQF_* above) * @thread_fn: interrupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts + * @secondary: pointer to secondary irqaction (force threading) * @thread_flags: flags related to @thread * @thread_mask: bitmask for keeping track of @thread activity * @dir: pointer to the proc/irq/NN/name entry @@ -113,6 +114,7 @@ struct irqaction { struct irqaction *next; irq_handler_t thread_fn; struct task_struct *thread; + struct irqaction *secondary; unsigned int irq; unsigned int flags; unsigned long thread_flags; diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h index bbced83b32ee..376a27c9cc6a 100644 --- a/include/linux/iommu-common.h +++ b/include/linux/iommu-common.h @@ -7,6 +7,7 @@ #define IOMMU_POOL_HASHBITS 4 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) +#define IOMMU_ERROR_CODE (~(unsigned long) 0) struct iommu_pool { unsigned long start; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f9c1b6d0f2e4..f28dff313b07 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -81,6 +81,7 @@ struct iommu_domain { iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; + void *iova_cookie; }; enum iommu_cap { @@ -167,7 +168,7 @@ struct iommu_ops { phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); - int (*device_group)(struct device *dev, unsigned int *groupid); + struct iommu_group *(*device_group)(struct device *dev); int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); int (*domain_set_attr)(struct iommu_domain *domain, @@ -316,6 +317,11 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, return domain->ops->map_sg(domain, iova, sg, nents, prot); } +/* PCI device grouping function */ +extern struct iommu_group *pci_device_group(struct device *dev); +/* Generic device grouping function */ +extern struct iommu_group *generic_device_group(struct device *dev); + #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 388e3ae94f7a..24bea087e7af 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -94,6 +94,7 @@ struct resource { /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) #define IORESOURCE_IO_FIXED (1<<1) +#define IORESOURCE_IO_SPARSE (1<<2) /* PCI ROM control bits (IORESOURCE_BITS) */ #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -int iommu_iova_cache_init(void); -void iommu_iova_cache_destroy(void); +int iova_cache_get(void); +void iova_cache_put(void); struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index f1f32af6d9b9..0ef2a97ccdb5 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -264,9 +264,9 @@ struct tcp6_timewait_sock { }; #if IS_ENABLED(CONFIG_IPV6) -static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) +static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) { - return inet_sk(__sk)->pinet6; + return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL; } static inline struct raw6_sock *raw6_sk(const struct sock *sk) diff --git a/include/linux/irq.h b/include/linux/irq.h index 6f8b34066442..3c1c96786248 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -67,11 +67,12 @@ enum irqchip_irq_state; * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context - * IRQ_NESTED_TRHEAD - Interrupt nests into another thread + * IRQ_NESTED_THREAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -97,21 +98,22 @@ enum { IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) /* * Return value for chip->irq_set_affinity() * - * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity + * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendent irqchips. @@ -129,9 +131,19 @@ struct irq_domain; * struct irq_common_data - per irq data shared by all irqchips * @state_use_accessors: status information for irq chip functions. * Use accessor functions to deal with it + * @node: node index useful for balancing + * @handler_data: per-IRQ data for the irq_chip methods + * @affinity: IRQ affinity on SMP + * @msi_desc: MSI descriptor */ struct irq_common_data { unsigned int state_use_accessors; +#ifdef CONFIG_NUMA + unsigned int node; +#endif + void *handler_data; + struct msi_desc *msi_desc; + cpumask_var_t affinity; }; /** @@ -139,38 +151,26 @@ struct irq_common_data { * @mask: precomputed bitmask for accessing the chip registers * @irq: interrupt number * @hwirq: hardware interrupt number, local to the interrupt domain - * @node: node index useful for balancing * @common: point to data shared by all irqchips * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. * @parent_data: pointer to parent struct irq_data to support hierarchy * irq_domain - * @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations - * @msi_desc: MSI descriptor - * @affinity: IRQ affinity on SMP - * - * The fields here need to overlay the ones in irq_desc until we - * cleaned up the direct references and switched everything over to - * irq_data. */ struct irq_data { u32 mask; unsigned int irq; unsigned long hwirq; - unsigned int node; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_data *parent_data; #endif - void *handler_data; void *chip_data; - struct msi_desc *msi_desc; - cpumask_var_t affinity; }; /* @@ -190,6 +190,7 @@ struct irq_data { * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed + * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -204,6 +205,7 @@ enum { IRQD_IRQ_MASKED = (1 << 17), IRQD_IRQ_INPROGRESS = (1 << 18), IRQD_WAKEUP_ARMED = (1 << 19), + IRQD_FORWARDED_TO_VCPU = (1 << 20), }; #define __irqd_to_state(d) ((d)->common->state_use_accessors) @@ -282,20 +284,19 @@ static inline bool irqd_is_wakeup_armed(struct irq_data *d) return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; } +static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; +} -/* - * Functions for chained handlers which can be enabled/disabled by the - * standard disable_irq/enable_irq calls. Must be called with - * irq_desc->lock held. - */ -static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) +static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) { - __irqd_to_state(d) |= IRQD_IRQ_INPROGRESS; + __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; } -static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) +static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) { - __irqd_to_state(d) &= ~IRQD_IRQ_INPROGRESS; + __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) @@ -438,6 +439,8 @@ extern int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask, bool force); extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); +extern void irq_migrate_all_off_this_cpu(void); + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) void irq_move_irq(struct irq_data *data); void irq_move_masked_irq(struct irq_data *data); @@ -461,14 +464,14 @@ static inline int irq_set_parent(int irq, int parent_irq) * Built-in IRQ handlers for various IRQ types, * callable via desc->handle_irq() */ -extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); -extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_level_irq(struct irq_desc *desc); +extern void handle_fasteoi_irq(struct irq_desc *desc); +extern void handle_edge_irq(struct irq_desc *desc); +extern void handle_edge_eoi_irq(struct irq_desc *desc); +extern void handle_simple_irq(struct irq_desc *desc); +extern void handle_percpu_irq(struct irq_desc *desc); +extern void handle_percpu_devid_irq(struct irq_desc *desc); +extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); @@ -627,23 +630,23 @@ static inline void *irq_data_get_irq_chip_data(struct irq_data *d) static inline void *irq_get_handler_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->handler_data : NULL; + return d ? d->common->handler_data : NULL; } static inline void *irq_data_get_irq_handler_data(struct irq_data *d) { - return d->handler_data; + return d->common->handler_data; } static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->msi_desc : NULL; + return d ? d->common->msi_desc : NULL; } static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) { - return d->msi_desc; + return d->common->msi_desc; } static inline u32 irq_get_trigger_type(unsigned int irq) @@ -652,21 +655,30 @@ static inline u32 irq_get_trigger_type(unsigned int irq) return d ? irqd_get_trigger_type(d) : 0; } -static inline int irq_data_get_node(struct irq_data *d) +static inline int irq_common_data_get_node(struct irq_common_data *d) { +#ifdef CONFIG_NUMA return d->node; +#else + return 0; +#endif +} + +static inline int irq_data_get_node(struct irq_data *d) +{ + return irq_common_data_get_node(d->common); } static inline struct cpumask *irq_get_affinity_mask(int irq) { struct irq_data *d = irq_get_irq_data(irq); - return d ? d->affinity : NULL; + return d ? d->common->affinity : NULL; } static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { - return d->affinity; + return d->common->affinity; } unsigned int arch_dynirq_lower_bound(unsigned int from); diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h new file mode 100644 index 000000000000..1551b5b2f4c2 --- /dev/null +++ b/include/linux/irqbypass.h @@ -0,0 +1,90 @@ +/* + * IRQ offload/bypass manager + * + * Copyright (C) 2015 Red Hat, Inc. + * Copyright (c) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef IRQBYPASS_H +#define IRQBYPASS_H + +#include <linux/list.h> + +struct irq_bypass_consumer; + +/* + * Theory of operation + * + * The IRQ bypass manager is a simple set of lists and callbacks that allows + * IRQ producers (ex. physical interrupt sources) to be matched to IRQ + * consumers (ex. virtualization hardware that allows IRQ bypass or offload) + * via a shared token (ex. eventfd_ctx). Producers and consumers register + * independently. When a token match is found, the optional @stop callback + * will be called for each participant. The pair will then be connected via + * the @add_* callbacks, and finally the optional @start callback will allow + * any final coordination. When either participant is unregistered, the + * process is repeated using the @del_* callbacks in place of the @add_* + * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings + * are not supported. + */ + +/** + * struct irq_bypass_producer - IRQ bypass producer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer + * @irq: Linux IRQ number for the producer device + * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) + * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass producer structure represents an interrupt source for + * participation in possible host bypass, for instance an interrupt vector + * for a physical device assigned to a VM. + */ +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +/** + * struct irq_bypass_consumer - IRQ bypass consumer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer + * @add_producer: Connect the IRQ consumer to an IRQ producer + * @del_producer: Disconnect the IRQ consumer from an IRQ producer + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass consumer structure represents an interrupt sink for + * participation in possible host bypass, for instance a hypervisor may + * support offloads to allow bypassing the host entirely or offload + * portions of the interrupt handling to the VM. + */ +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +int irq_bypass_register_producer(struct irq_bypass_producer *); +void irq_bypass_unregister_producer(struct irq_bypass_producer *); +int irq_bypass_register_consumer(struct irq_bypass_consumer *); +void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); + +#endif /* IRQBYPASS_H */ diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 638887376e58..89c34b200671 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -11,6 +11,7 @@ #ifndef _LINUX_IRQCHIP_H #define _LINUX_IRQCHIP_H +#include <linux/acpi.h> #include <linux/of.h> /* @@ -25,6 +26,22 @@ */ #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) +/* + * This macro must be used by the different irqchip drivers to declare + * the association between their version and their initialization function. + * + * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the + * same file. + * @subtable: Subtable to be identified in MADT + * @validate: Function to be called on that subtable to check its validity. + * Can be NULL. + * @data: data to be checked by the validate function. + * @fn: initialization function + */ +#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ + ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ + subtable, validate, data, fn) + #ifdef CONFIG_IRQCHIP void irqchip_init(void); #else diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h deleted file mode 100644 index de3419ed3937..000000000000 --- a/include/linux/irqchip/arm-gic-acpi.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (C) 2014, Linaro Ltd. - * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef ARM_GIC_ACPI_H_ -#define ARM_GIC_ACPI_H_ - -#ifdef CONFIG_ACPI - -/* - * Hard code here, we can not get memory size from MADT (but FDT does), - * Actually no need to do that, because this size can be inferred - * from GIC spec. - */ -#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) -#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) - -struct acpi_table_header; - -int gic_v2_acpi_init(struct acpi_table_header *table); -void acpi_gic_init(void); -#else -static inline void acpi_gic_init(void) { } -#endif - -#endif /* ARM_GIC_ACPI_H_ */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9eeeb9589acf..c9ae0c6ec050 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -18,8 +18,6 @@ #ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H #define __LINUX_IRQCHIP_ARM_GIC_V3_H -#include <asm/sysreg.h> - /* * Distributor registers. We assume we're running non-secure, with ARE * being set. Secure-only and non-ARE registers are not described. @@ -231,6 +229,7 @@ #define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGES_MAX 256 #define GITS_BASER_TYPE_NONE 0 #define GITS_BASER_TYPE_DEVICE 1 @@ -266,16 +265,16 @@ /* * Hypervisor interface registers (SRE only) */ -#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) - -#define ICH_LR_EOI (1UL << 41) -#define ICH_LR_GROUP (1UL << 60) -#define ICH_LR_HW (1UL << 61) -#define ICH_LR_STATE (3UL << 62) -#define ICH_LR_PENDING_BIT (1UL << 62) -#define ICH_LR_ACTIVE_BIT (1UL << 63) +#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) + +#define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_GROUP (1ULL << 60) +#define ICH_LR_HW (1ULL << 61) +#define ICH_LR_STATE (3ULL << 62) +#define ICH_LR_PENDING_BIT (1ULL << 62) +#define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_PHYS_ID_SHIFT 32 -#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT) +#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) #define ICH_MISR_EOI (1 << 0) #define ICH_MISR_U (1 << 1) @@ -292,19 +291,8 @@ #define ICH_VMCR_PMR_SHIFT 24 #define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) -#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) -#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1) -#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) -#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) -#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) -#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) -#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) -#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) - #define ICC_IAR1_EL1_SPURIOUS 0x3ff -#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) - #define ICC_SRE_EL2_SRE (1 << 0) #define ICC_SRE_EL2_ENABLE (1 << 3) @@ -320,54 +308,10 @@ #define ICC_SGI1R_AFFINITY_3_SHIFT 48 #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -/* - * System register definitions - */ -#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) -#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) -#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) -#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) -#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) -#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) -#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) - -#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) -#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) - -#define ICH_LR0_EL2 __LR0_EL2(0) -#define ICH_LR1_EL2 __LR0_EL2(1) -#define ICH_LR2_EL2 __LR0_EL2(2) -#define ICH_LR3_EL2 __LR0_EL2(3) -#define ICH_LR4_EL2 __LR0_EL2(4) -#define ICH_LR5_EL2 __LR0_EL2(5) -#define ICH_LR6_EL2 __LR0_EL2(6) -#define ICH_LR7_EL2 __LR0_EL2(7) -#define ICH_LR8_EL2 __LR8_EL2(0) -#define ICH_LR9_EL2 __LR8_EL2(1) -#define ICH_LR10_EL2 __LR8_EL2(2) -#define ICH_LR11_EL2 __LR8_EL2(3) -#define ICH_LR12_EL2 __LR8_EL2(4) -#define ICH_LR13_EL2 __LR8_EL2(5) -#define ICH_LR14_EL2 __LR8_EL2(6) -#define ICH_LR15_EL2 __LR8_EL2(7) - -#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) -#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) -#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) -#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) -#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) - -#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) -#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) -#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) -#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) -#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) +#include <asm/arch_gicv3.h> #ifndef __ASSEMBLY__ -#include <linux/stringify.h> -#include <asm/msi.h> - /* * We need a value to serve as a irq-type for LPIs. Choose one that will * hopefully pique the interest of the reviewer. @@ -385,23 +329,26 @@ struct rdists { u64 flags; }; -static inline void gic_write_eoir(u64 irq) -{ - asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); - isb(); -} - -static inline void gic_write_dir(u64 irq) -{ - asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq)); - isb(); -} - struct irq_domain; int its_cpu_init(void); int its_init(struct device_node *node, struct rdists *rdists, struct irq_domain *domain); +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + #endif #endif diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index b8901dfd9e95..bae69e5d693c 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -100,16 +100,11 @@ struct device_node; -void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, - u32 offset, struct device_node *); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); -static inline void gic_init(unsigned int nr, int start, - void __iomem *dist , void __iomem *cpu) -{ - gic_init_bases(nr, start, dist, cpu, 0, NULL); -} +void gic_init(unsigned int nr, int start, + void __iomem *dist , void __iomem *cpu); int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 5acfa26602e1..a587a33363c7 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -98,11 +98,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) { -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY - return irq_to_desc(data->irq); -#else - return container_of(data, struct irq_desc, irq_data); -#endif + return container_of(data->common, struct irq_desc, irq_common_data); } static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) @@ -127,23 +123,21 @@ static inline void *irq_desc_get_chip_data(struct irq_desc *desc) static inline void *irq_desc_get_handler_data(struct irq_desc *desc) { - return desc->irq_data.handler_data; + return desc->irq_common_data.handler_data; } static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) { - return desc->irq_data.msi_desc; + return desc->irq_common_data.msi_desc; } /* * Architectures call this to let the generic IRQ layer - * handle an interrupt. If the descriptor is attached to an - * irqchip-style controller then we call the ->handle_irq() handler, - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. + * handle an interrupt. */ -static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) +static inline void generic_handle_irq_desc(struct irq_desc *desc) { - desc->handle_irq(irq, desc); + desc->handle_irq(desc); } int generic_handle_irq(unsigned int irq); @@ -176,29 +170,6 @@ static inline int irq_has_action(unsigned int irq) return irq_desc_has_action(irq_to_desc(irq)); } -/* caller has locked the irq_desc and both params are valid */ -static inline void __irq_set_handler_locked(unsigned int irq, - irq_flow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->handle_irq = handler; -} - -/* caller has locked the irq_desc and both params are valid */ -static inline void -__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, - irq_flow_handler_t handler, const char *name) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - irq_desc_get_irq_data(desc)->chip = chip; - desc->handle_irq = handler; - desc->name = name; -} - /** * irq_set_handler_locked - Set irq handler from a locked region * @data: Pointer to the irq_data structure which identifies the irq diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..d5e5c5bef28c 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -5,9 +5,10 @@ * helpful for interrupt controllers to implement mapping between hardware * irq numbers and the Linux irq number space. * - * irq_domains also have a hook for translating device tree interrupt - * representation into a hardware irq number that can be mapped back to a - * Linux irq number without any extra platform support code. + * irq_domains also have hooks for translating device tree or other + * firmware interrupt representations into a hardware irq number that + * can be mapped back to a Linux irq number without any extra platform + * support code. * * Interrupt controller "domain" data structure. This could be defined as a * irq domain controller. That is, it handles the mapping between hardware @@ -17,16 +18,12 @@ * model). It's the domain callbacks that are responsible for setting the * irq_chip on a given irq_desc after it's been mapped. * - * The host code and data structures are agnostic to whether or not - * we use an open firmware device-tree. We do have references to struct - * device_node in two places: in irq_find_host() to find the host matching - * a given interrupt controller node, and of course as an argument to its - * counterpart domain->ops->match() callback. However, those are treated as - * generic pointers by the core and the fact that it's actually a device-node - * pointer is purely a convention between callers and implementation. This - * code could thus be used on other architectures by replacing those two - * by some sort of arch-specific void * "token" used to identify interrupt - * controllers. + * The host code and data structures use a fwnode_handle pointer to + * identify the domain. In some cases, and in order to preserve source + * code compatibility, this fwnode pointer is "upgraded" to a DT + * device_node. For those firmware infrastructures that do not provide + * a unique identifier for an interrupt controller, the irq_domain + * code offers a fwnode allocator. */ #ifndef _LINUX_IRQDOMAIN_H @@ -34,6 +31,7 @@ #include <linux/types.h> #include <linux/irqhandler.h> +#include <linux/of.h> #include <linux/radix-tree.h> struct device_node; @@ -45,6 +43,24 @@ struct irq_data; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 +#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 + +/** + * struct irq_fwspec - generic IRQ specifier structure + * + * @fwnode: Pointer to a firmware-specific descriptor + * @param_count: Number of device-specific parameters + * @param: Device-specific parameters + * + * This structure, directly modeled after of_phandle_args, is used to + * pass a device-specific description of an interrupt. + */ +struct irq_fwspec { + struct fwnode_handle *fwnode; + int param_count; + u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; +}; + /* * Should several domains have the same device node, but serve * different purposes (for example one domain is for PCI/MSI, and the @@ -91,6 +107,8 @@ struct irq_domain_ops { unsigned int nr_irqs); void (*activate)(struct irq_domain *d, struct irq_data *irq_data); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); + int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *out_hwirq, unsigned int *out_type); #endif }; @@ -130,7 +148,7 @@ struct irq_domain { unsigned int flags; /* Optional data */ - struct device_node *of_node; + struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY @@ -161,8 +179,15 @@ enum { IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; +static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) +{ + return to_of_node(d->fwnode); +} + #ifdef CONFIG_IRQ_DOMAIN -struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, +struct fwnode_handle *irq_domain_alloc_fwnode(void *data); +void irq_domain_free_fwnode(struct fwnode_handle *fwnode); +struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); @@ -177,10 +202,21 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); -extern struct irq_domain *irq_find_matching_host(struct device_node *node, - enum irq_domain_bus_token bus_token); +extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, + enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); +static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) +{ + return node ? &node->fwnode : NULL; +} + +static inline struct irq_domain *irq_find_matching_host(struct device_node *node, + enum irq_domain_bus_token bus_token) +{ + return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); +} + static inline struct irq_domain *irq_find_host(struct device_node *node) { return irq_find_matching_host(node, DOMAIN_BUS_ANY); @@ -198,14 +234,14 @@ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_no const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node, size, size, 0, ops, host_data); + return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); } static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); + return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data); } static inline struct irq_domain *irq_domain_add_legacy_isa( struct device_node *of_node, @@ -219,7 +255,22 @@ static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node const struct irq_domain_ops *ops, void *host_data) { - return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); + return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data); +} + +static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, + unsigned int size, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(fwnode, size, size, 0, ops, host_data); +} + +static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data); } extern void irq_domain_remove(struct irq_domain *host); @@ -234,6 +285,7 @@ extern void irq_domain_disassociate(struct irq_domain *domain, extern unsigned int irq_create_mapping(struct irq_domain *host, irq_hw_number_t hwirq); +extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); /** @@ -285,10 +337,23 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, +extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, - struct device_node *node, + struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); + +static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, + unsigned int flags, + unsigned int size, + struct device_node *node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_hierarchy(parent, flags, size, + of_node_to_fwnode(node), + ops, host_data); +} + extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc); diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 62d543004197..661bed0ed1f3 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -8,7 +8,7 @@ struct irq_desc; struct irq_data; -typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); +typedef void (*irq_flow_handler_t)(struct irq_desc *desc); typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index e374e369fb2f..eb1bdcf95f2e 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -3,7 +3,7 @@ /** * enum irqreturn - * @IRQ_NONE interrupt was not from this device + * @IRQ_NONE interrupt was not from this device or was not handled * @IRQ_HANDLED interrupt was handled by this device * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7f653e8f6690..8dde55974f18 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -21,8 +21,8 @@ * * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); - * static_key_likely() - * statick_key_unlikely() + * static_branch_likely() + * static_branch_unlikely() * * Jump labels provide an interface to generate dynamic branches using * self-modifying code. Assuming toolchain and architecture support, if we @@ -45,12 +45,10 @@ * statement, setting the key to true requires us to patch in a jump * to the out-of-line of true branch. * - * In addtion to static_branch_{enable,disable}, we can also reference count + * In addition to static_branch_{enable,disable}, we can also reference count * the key or branch direction via static_branch_{inc,dec}. Thus, * static_branch_inc() can be thought of as a 'make more true' and - * static_branch_dec() as a 'make more false'. The inc()/dec() - * interface is meant to be used exclusively from the inc()/dec() for a given - * key. + * static_branch_dec() as a 'make more false'. * * Since this relies on modifying code, the branch modifying functions * must be considered absolute slow paths (machine wide synchronization etc.). @@ -216,11 +214,6 @@ static inline int jump_label_apply_nops(struct module *mod) #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled -static inline bool static_key_enabled(struct static_key *key) -{ - return static_key_count(key) > 0; -} - static inline void static_key_enable(struct static_key *key) { int count = static_key_count(key); @@ -267,6 +260,17 @@ struct static_key_false { #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT +extern bool ____wrong_branch_error(void); + +#define static_key_enabled(x) \ +({ \ + if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ + !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ + !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + ____wrong_branch_error(); \ + static_key_count((struct static_key *)x) > 0; \ +}) + #ifdef HAVE_JUMP_LABEL /* @@ -325,8 +329,6 @@ struct static_key_false { * See jump_label_type() / jump_label_init_type(). */ -extern bool ____wrong_branch_error(void); - #define static_branch_likely(x) \ ({ \ bool branch; \ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index ff9f1d394235..7463355a198b 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -40,8 +40,7 @@ struct key_construction { */ struct key_preparsed_payload { char *description; /* Proposed key description (or NULL) */ - void *type_data[2]; /* Private key-type data */ - void *payload[2]; /* Proposed payload */ + union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ diff --git a/include/linux/key.h b/include/linux/key.h index e1d4715f3222..66f705243985 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -89,6 +89,11 @@ struct keyring_index_key { size_t desc_len; }; +union key_payload { + void __rcu *rcu_data0; + void *data[4]; +}; + /*****************************************************************************/ /* * key reference with possession attribute handling @@ -186,28 +191,18 @@ struct key { }; }; - /* type specific data - * - this is used by the keyring type to index the name - */ - union { - struct list_head link; - unsigned long x[2]; - void *p[2]; - int reject_error; - } type_data; - /* key data * - this is used to hold the data actually used in cryptography or * whatever */ union { - union { - unsigned long value; - void __rcu *rcudata; - void *data; - void *data2[2]; - } payload; - struct assoc_array keys; + union key_payload payload; + struct { + /* Keyring bits */ + struct list_head name_link; + struct assoc_array keys; + }; + int reject_error; }; }; @@ -336,12 +331,12 @@ static inline bool key_is_instantiated(const struct key *key) } #define rcu_dereference_key(KEY) \ - (rcu_dereference_protected((KEY)->payload.rcudata, \ + (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) #define rcu_assign_keypointer(KEY, PAYLOAD) \ do { \ - rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \ + rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \ } while (0) #ifdef CONFIG_SYSCTL diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 637f67002c5a..e6284591599e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -66,7 +66,7 @@ struct kobject { struct kobject *parent; struct kset *kset; struct kobj_type *ktype; - struct kernfs_node *sd; + struct kernfs_node *sd; /* sysfs directory entry */ struct kref kref; #ifdef CONFIG_DEBUG_KOBJECT_RELEASE struct delayed_work release; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1bef9e21e725..242a6d2b53ff 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/irqflags.h> #include <linux/context_tracking.h> +#include <linux/irqbypass.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -140,6 +141,8 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_APIC_PAGE_RELOAD 25 #define KVM_REQ_SMI 26 #define KVM_REQ_HV_CRASH 27 +#define KVM_REQ_IOAPIC_EOI_EXIT 28 +#define KVM_REQ_HV_RESET 29 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -231,6 +234,9 @@ struct kvm_vcpu { unsigned long requests; unsigned long guest_debug; + int pre_pcpu; + struct list_head blocked_vcpu_list; + struct mutex mutex; struct kvm_run *run; @@ -329,6 +335,18 @@ struct kvm_kernel_irq_routing_entry { struct hlist_node link; }; +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING +struct kvm_irq_routing_table { + int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; + u32 nr_rt_entries; + /* + * Array indexed by gsi. Each entry contains list of irq chips + * the gsi is connected to. + */ + struct hlist_head map[0]; +}; +#endif + #ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif @@ -455,10 +473,14 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); +void kvm_arch_irq_routing_update(struct kvm *kvm); #else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } +static inline void kvm_arch_irq_routing_update(struct kvm *kvm) +{ +} #endif #ifdef CONFIG_HAVE_KVM_IRQFD @@ -625,6 +647,8 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); @@ -803,10 +827,13 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); -int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int irq_source_id, int level, bool line_status); +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status); bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); +void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); @@ -1002,6 +1029,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) #endif int kvm_setup_default_irq_routing(struct kvm *kvm); +int kvm_setup_empty_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, @@ -1144,5 +1172,15 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ -#endif +#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); +void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); +int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set); +#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ +#endif diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h new file mode 100644 index 000000000000..0c1de05098c8 --- /dev/null +++ b/include/linux/kvm_irqfd.h @@ -0,0 +1,71 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * irqfd: Allows an fd to be used to inject an interrupt to the guest + * Credit goes to Avi Kivity for the original idea. + */ + +#ifndef __LINUX_KVM_IRQFD_H +#define __LINUX_KVM_IRQFD_H + +#include <linux/kvm_host.h> +#include <linux/poll.h> + +/* + * Resampling irqfds are a special variety of irqfds used to emulate + * level triggered interrupts. The interrupt is asserted on eventfd + * trigger. On acknowledgment through the irq ack notifier, the + * interrupt is de-asserted and userspace is notified through the + * resamplefd. All resamplers on the same gsi are de-asserted + * together, so we don't need to track the state of each individual + * user. We can also therefore share the same irq source ID. + */ +struct kvm_kernel_irqfd_resampler { + struct kvm *kvm; + /* + * List of resampling struct _irqfd objects sharing this gsi. + * RCU list modified under kvm->irqfds.resampler_lock + */ + struct list_head list; + struct kvm_irq_ack_notifier notifier; + /* + * Entry in list of kvm->irqfd.resampler_list. Use for sharing + * resamplers among irqfds on the same gsi. + * Accessed and modified under kvm->irqfds.resampler_lock + */ + struct list_head link; +}; + +struct kvm_kernel_irqfd { + /* Used for MSI fast-path */ + struct kvm *kvm; + wait_queue_t wait; + /* Update side is protected by irqfds.lock */ + struct kvm_kernel_irq_routing_entry irq_entry; + seqcount_t irq_entry_sc; + /* Used for level IRQ fast-path */ + int gsi; + struct work_struct inject; + /* The resampler used by this irqfd (resampler-only) */ + struct kvm_kernel_irqfd_resampler *resampler; + /* Eventfd notified on resample (resampler-only) */ + struct eventfd_ctx *resamplefd; + /* Entry in list of irqfds for a resampler (resampler-only) */ + struct list_head resampler_link; + /* Used for setup/shutdown */ + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; + struct irq_bypass_consumer consumer; + struct irq_bypass_producer *producer; +}; + +#endif /* __LINUX_KVM_IRQFD_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index b122eeafb5dc..fa359c79c825 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -283,6 +283,13 @@ static inline void led_trigger_register_simple(const char *name, static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} static inline void led_trigger_event(struct led_trigger *trigger, enum led_brightness event) {} +static inline void led_trigger_blink(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off) {} +static inline void led_trigger_blink_oneshot(struct led_trigger *trigger, + unsigned long *delay_on, + unsigned long *delay_off, + int invert) {} static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} static inline void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger) {} diff --git a/include/linux/libata.h b/include/linux/libata.h index c9cfbcdb8d14..83577f8fd15b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -254,6 +254,7 @@ enum { ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ + ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */ /* struct ata_queued_cmd flags */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h new file mode 100644 index 000000000000..5ebd70d12f35 --- /dev/null +++ b/include/linux/lightnvm.h @@ -0,0 +1,522 @@ +#ifndef NVM_H +#define NVM_H + +enum { + NVM_IO_OK = 0, + NVM_IO_REQUEUE = 1, + NVM_IO_DONE = 2, + NVM_IO_ERR = 3, + + NVM_IOTYPE_NONE = 0, + NVM_IOTYPE_GC = 1, +}; + +#ifdef CONFIG_NVM + +#include <linux/blkdev.h> +#include <linux/types.h> +#include <linux/file.h> +#include <linux/dmapool.h> + +enum { + /* HW Responsibilities */ + NVM_RSP_L2P = 1 << 0, + NVM_RSP_ECC = 1 << 1, + + /* Physical Adressing Mode */ + NVM_ADDRMODE_LINEAR = 0, + NVM_ADDRMODE_CHANNEL = 1, + + /* Plane programming mode for LUN */ + NVM_PLANE_SINGLE = 0, + NVM_PLANE_DOUBLE = 1, + NVM_PLANE_QUAD = 2, + + /* Status codes */ + NVM_RSP_SUCCESS = 0x0, + NVM_RSP_NOT_CHANGEABLE = 0x1, + NVM_RSP_ERR_FAILWRITE = 0x40ff, + NVM_RSP_ERR_EMPTYPAGE = 0x42ff, + + /* Device opcodes */ + NVM_OP_HBREAD = 0x02, + NVM_OP_HBWRITE = 0x81, + NVM_OP_PWRITE = 0x91, + NVM_OP_PREAD = 0x92, + NVM_OP_ERASE = 0x90, + + /* PPA Command Flags */ + NVM_IO_SNGL_ACCESS = 0x0, + NVM_IO_DUAL_ACCESS = 0x1, + NVM_IO_QUAD_ACCESS = 0x2, + + NVM_IO_SUSPEND = 0x80, + NVM_IO_SLC_MODE = 0x100, + NVM_IO_SCRAMBLE_DISABLE = 0x200, +}; + +struct nvm_id_group { + u8 mtype; + u8 fmtype; + u16 res16; + u8 num_ch; + u8 num_lun; + u8 num_pln; + u16 num_blk; + u16 num_pg; + u16 fpg_sz; + u16 csecs; + u16 sos; + u32 trdt; + u32 trdm; + u32 tprt; + u32 tprm; + u32 tbet; + u32 tbem; + u32 mpos; + u16 cpar; + u8 res[913]; +} __packed; + +struct nvm_addr_format { + u8 ch_offset; + u8 ch_len; + u8 lun_offset; + u8 lun_len; + u8 pln_offset; + u8 pln_len; + u8 blk_offset; + u8 blk_len; + u8 pg_offset; + u8 pg_len; + u8 sect_offset; + u8 sect_len; + u8 res[4]; +}; + +struct nvm_id { + u8 ver_id; + u8 vmnt; + u8 cgrps; + u8 res[5]; + u32 cap; + u32 dom; + struct nvm_addr_format ppaf; + u8 ppat; + u8 resv[224]; + struct nvm_id_group groups[4]; +} __packed; + +struct nvm_target { + struct list_head list; + struct nvm_tgt_type *type; + struct gendisk *disk; +}; + +struct nvm_tgt_instance { + struct nvm_tgt_type *tt; +}; + +#define ADDR_EMPTY (~0ULL) + +#define NVM_VERSION_MAJOR 1 +#define NVM_VERSION_MINOR 0 +#define NVM_VERSION_PATCH 0 + +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (6) +#define NVM_PG_BITS (16) +#define NVM_BLK_BITS (16) +#define NVM_LUN_BITS (10) +#define NVM_CH_BITS (8) + +struct ppa_addr { + union { + /* Channel-based PPA format in nand 4x2x2x2x8x10 */ + struct { + u64 ch : 4; + u64 sec : 2; /* 4 sectors per page */ + u64 pl : 2; /* 4 planes per LUN */ + u64 lun : 2; /* 4 LUNs per channel */ + u64 pg : 8; /* 256 pages per block */ + u64 blk : 10;/* 1024 blocks per plane */ + u64 resved : 36; + } chnl; + + /* Generic structure for all addresses */ + struct { + u64 sec : NVM_SEC_BITS; + u64 pl : NVM_PL_BITS; + u64 pg : NVM_PG_BITS; + u64 blk : NVM_BLK_BITS; + u64 lun : NVM_LUN_BITS; + u64 ch : NVM_CH_BITS; + } g; + + u64 ppa; + }; +} __packed; + +struct nvm_rq { + struct nvm_tgt_instance *ins; + struct nvm_dev *dev; + + struct bio *bio; + + union { + struct ppa_addr ppa_addr; + dma_addr_t dma_ppa_list; + }; + + struct ppa_addr *ppa_list; + + void *metadata; + dma_addr_t dma_metadata; + + uint8_t opcode; + uint16_t nr_pages; + uint16_t flags; +}; + +static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) +{ + return pdu - sizeof(struct nvm_rq); +} + +static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) +{ + return rqdata + 1; +} + +struct nvm_block; + +typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); +typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); +typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); +typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, + nvm_l2p_update_fn *, void *); +typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, + nvm_bb_update_fn *, void *); +typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); +typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); +typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); +typedef void (nvm_destroy_dma_pool_fn)(void *); +typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, + dma_addr_t *); +typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_get_l2p_tbl_fn *get_l2p_tbl; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb; + + nvm_submit_io_fn *submit_io; + nvm_erase_blk_fn *erase_block; + + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; + + uint8_t max_phys_sect; +}; + +struct nvm_lun { + int id; + + int lun_id; + int chnl_id; + + unsigned int nr_free_blocks; /* Number of unused blocks */ + struct nvm_block *blocks; + + spinlock_t lock; +}; + +struct nvm_block { + struct list_head list; + struct nvm_lun *lun; + unsigned long id; + + void *priv; + int type; +}; + +struct nvm_dev { + struct nvm_dev_ops *ops; + + struct list_head devices; + struct list_head online_targets; + + /* Media manager */ + struct nvmm_type *mt; + void *mp; + + /* Device information */ + int nr_chnls; + int nr_planes; + int luns_per_chnl; + int sec_per_pg; /* only sectors for a single page */ + int pgs_per_blk; + int blks_per_lun; + int sec_size; + int oob_size; + int addr_mode; + struct nvm_addr_format addr_format; + + /* Calculated/Cached values. These do not reflect the actual usable + * blocks at run-time. + */ + int max_rq_size; + int plane_mode; /* drive device in single, double or quad mode */ + + int sec_per_pl; /* all sectors across planes */ + int sec_per_blk; + int sec_per_lun; + + unsigned long total_pages; + unsigned long total_blocks; + int nr_luns; + unsigned max_pages_per_blk; + + void *ppalist_pool; + + struct nvm_id identity; + + /* Backend device */ + struct request_queue *q; + char name[DISK_NAME_LEN]; +}; + +/* fallback conversion */ +static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + + l.ppa = r.g.sec + + r.g.pg * dev->sec_per_pg + + r.g.blk * (dev->pgs_per_blk * + dev->sec_per_pg) + + r.g.lun * (dev->blks_per_lun * + dev->pgs_per_blk * + dev->sec_per_pg) + + r.g.ch * (dev->blks_per_lun * + dev->pgs_per_blk * + dev->luns_per_chnl * + dev->sec_per_pg); + + return l; +} + +/* fallback conversion */ +static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, + struct ppa_addr r) +{ + struct ppa_addr l; + int secs, pgs, blks, luns; + sector_t ppa = r.ppa; + + l.ppa = 0; + + div_u64_rem(ppa, dev->sec_per_pg, &secs); + l.g.sec = secs; + + sector_div(ppa, dev->sec_per_pg); + div_u64_rem(ppa, dev->sec_per_blk, &pgs); + l.g.pg = pgs; + + sector_div(ppa, dev->pgs_per_blk); + div_u64_rem(ppa, dev->blks_per_lun, &blks); + l.g.blk = blks; + + sector_div(ppa, dev->blks_per_lun); + div_u64_rem(ppa, dev->luns_per_chnl, &luns); + l.g.lun = luns; + + sector_div(ppa, dev->luns_per_chnl); + l.g.ch = ppa; + + return l; +} + +static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) +{ + struct ppa_addr l; + + l.ppa = 0; + + l.chnl.sec = r.g.sec; + l.chnl.pl = r.g.pl; + l.chnl.pg = r.g.pg; + l.chnl.blk = r.g.blk; + l.chnl.lun = r.g.lun; + l.chnl.ch = r.g.ch; + + return l; +} + +static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) +{ + struct ppa_addr l; + + l.ppa = 0; + + l.g.sec = r.chnl.sec; + l.g.pl = r.chnl.pl; + l.g.pg = r.chnl.pg; + l.g.blk = r.chnl.blk; + l.g.lun = r.chnl.lun; + l.g.ch = r.chnl.ch; + + return l; +} + +static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, + struct ppa_addr gppa) +{ + switch (dev->addr_mode) { + case NVM_ADDRMODE_LINEAR: + return __linear_to_generic_addr(dev, gppa); + case NVM_ADDRMODE_CHANNEL: + return __chnl_to_generic_addr(gppa); + default: + BUG(); + } + return gppa; +} + +static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, + struct ppa_addr gppa) +{ + switch (dev->addr_mode) { + case NVM_ADDRMODE_LINEAR: + return __generic_to_linear_addr(dev, gppa); + case NVM_ADDRMODE_CHANNEL: + return __generic_to_chnl_addr(gppa); + default: + BUG(); + } + return gppa; +} + +static inline int ppa_empty(struct ppa_addr ppa_addr) +{ + return (ppa_addr.ppa == ADDR_EMPTY); +} + +static inline void ppa_set_empty(struct ppa_addr *ppa_addr) +{ + ppa_addr->ppa = ADDR_EMPTY; +} + +static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, + struct nvm_block *blk) +{ + struct ppa_addr ppa; + struct nvm_lun *lun = blk->lun; + + ppa.ppa = 0; + ppa.g.blk = blk->id % dev->blks_per_lun; + ppa.g.lun = lun->lun_id; + ppa.g.ch = lun->chnl_id; + + return ppa; +} + +typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); +typedef sector_t (nvm_tgt_capacity_fn)(void *); +typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int); +typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); +typedef void (nvm_tgt_exit_fn)(void *); + +struct nvm_tgt_type { + const char *name; + unsigned int version[3]; + + /* target entry points */ + nvm_tgt_make_rq_fn *make_rq; + nvm_tgt_capacity_fn *capacity; + nvm_tgt_end_io_fn *end_io; + + /* module-specific init/teardown */ + nvm_tgt_init_fn *init; + nvm_tgt_exit_fn *exit; + + /* For internal use */ + struct list_head list; +}; + +extern int nvm_register_target(struct nvm_tgt_type *); +extern void nvm_unregister_target(struct nvm_tgt_type *); + +extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); +extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); + +typedef int (nvmm_register_fn)(struct nvm_dev *); +typedef void (nvmm_unregister_fn)(struct nvm_dev *); +typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *, + struct nvm_lun *, unsigned long); +typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); +typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); +typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, + unsigned long); +typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); +typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); + +struct nvmm_type { + const char *name; + unsigned int version[3]; + + nvmm_register_fn *register_mgr; + nvmm_unregister_fn *unregister_mgr; + + /* Block administration callbacks */ + nvmm_get_blk_fn *get_blk; + nvmm_put_blk_fn *put_blk; + nvmm_open_blk_fn *open_blk; + nvmm_close_blk_fn *close_blk; + nvmm_flush_blk_fn *flush_blk; + + nvmm_submit_io_fn *submit_io; + nvmm_end_io_fn *end_io; + nvmm_erase_blk_fn *erase_blk; + + /* Configuration management */ + nvmm_get_lun_fn *get_lun; + + /* Statistics */ + nvmm_free_blocks_print_fn *free_blocks_print; + struct list_head list; +}; + +extern int nvm_register_mgr(struct nvmm_type *); +extern void nvm_unregister_mgr(struct nvmm_type *); + +extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, + unsigned long); +extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); + +extern int nvm_register(struct request_queue *, char *, + struct nvm_dev_ops *); +extern void nvm_unregister(char *); + +extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); +extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +#else /* CONFIG_NVM */ +struct nvm_dev_ops; + +static inline int nvm_register(struct request_queue *q, char *disk_name, + struct nvm_dev_ops *ops) +{ + return -EINVAL; +} +static inline void nvm_unregister(char *disk_name) {} +#endif /* CONFIG_NVM */ +#endif /* LIGHTNVM.H */ diff --git a/include/linux/list.h b/include/linux/list.h index 3e3e64a61002..993395a2e55c 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -87,7 +87,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; - prev->next = next; + WRITE_ONCE(prev->next, next); } /** @@ -615,7 +615,8 @@ static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; - *pprev = next; + + WRITE_ONCE(*pprev, next); if (next) next->pprev = pprev; } diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 2eb88556c5c5..8132214e8efd 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -93,9 +93,10 @@ static inline void __hlist_bl_del(struct hlist_bl_node *n) LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); /* pprev may be `first`, so be careful not to lose the lock bit */ - *pprev = (struct hlist_bl_node *) + WRITE_ONCE(*pprev, + (struct hlist_bl_node *) ((unsigned long)next | - ((unsigned long)*pprev & LIST_BL_LOCKMASK)); + ((unsigned long)*pprev & LIST_BL_LOCKMASK))); if (next) next->pprev = pprev; } diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index f266661d2666..444d2b1313bd 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -76,7 +76,8 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n) { struct hlist_nulls_node *next = n->next; struct hlist_nulls_node **pprev = n->pprev; - *pprev = next; + + WRITE_ONCE(*pprev, next); if (!is_a_nulls(next)) next->pprev = pprev; } diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 0962b2ca628a..e746919530f5 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -8,8 +8,8 @@ struct mei_cl_device; struct mei_device; -typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device, - u32 events, void *context); +typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, + u32 events, void *context); /** * struct mei_cl_device - MEI device handle @@ -45,7 +45,7 @@ struct mei_cl_device { char name[MEI_CL_NAME_SIZE]; struct work_struct event_work; - mei_cl_event_cb_t event_cb; + mei_cldev_event_cb_t event_cb; void *event_context; unsigned long events_mask; unsigned long events; @@ -62,33 +62,37 @@ struct mei_cl_driver { const struct mei_cl_device_id *id_table; - int (*probe)(struct mei_cl_device *dev, + int (*probe)(struct mei_cl_device *cldev, const struct mei_cl_device_id *id); - int (*remove)(struct mei_cl_device *dev); + int (*remove)(struct mei_cl_device *cldev); }; -int __mei_cl_driver_register(struct mei_cl_driver *driver, +int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, struct module *owner); -#define mei_cl_driver_register(driver) \ - __mei_cl_driver_register(driver, THIS_MODULE) +#define mei_cldev_driver_register(cldrv) \ + __mei_cldev_driver_register(cldrv, THIS_MODULE) -void mei_cl_driver_unregister(struct mei_cl_driver *driver); +void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); -ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length); -ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); -int mei_cl_register_event_cb(struct mei_cl_device *device, - unsigned long event_mask, - mei_cl_event_cb_t read_cb, void *context); +int mei_cldev_register_event_cb(struct mei_cl_device *cldev, + unsigned long event_mask, + mei_cldev_event_cb_t read_cb, void *context); #define MEI_CL_EVENT_RX 0 #define MEI_CL_EVENT_TX 1 #define MEI_CL_EVENT_NOTIF 2 -void *mei_cl_get_drvdata(const struct mei_cl_device *device); -void mei_cl_set_drvdata(struct mei_cl_device *device, void *data); +const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); +u8 mei_cldev_ver(const struct mei_cl_device *cldev); -int mei_cl_enable_device(struct mei_cl_device *device); -int mei_cl_disable_device(struct mei_cl_device *device); +void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev); +void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); + +int mei_cldev_enable(struct mei_cl_device *cldev); +int mei_cldev_disable(struct mei_cl_device *cldev); +bool mei_cldev_enabled(struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index c518eb589260..24daf8fc4d7c 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -89,10 +89,6 @@ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags); -int memblock_remove_range(struct memblock_type *type, - phys_addr_t base, - phys_addr_t size); - void __next_mem_range(u64 *idx, int nid, ulong flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..cd0e2413c358 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -213,6 +213,9 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; + /* handle for "memory.events" */ + struct cgroup_file events_file; + /* protect arrays of thresholds */ struct mutex thresholds_lock; @@ -242,7 +245,6 @@ struct mem_cgroup { * percpu counter. */ struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; @@ -286,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, unsigned int nr) { this_cpu_add(memcg->stat->events[idx], nr); + cgroup_file_notify(&memcg->events_file); } bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); @@ -298,8 +301,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); -void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, - bool lrucare); +void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); @@ -347,9 +349,7 @@ ino_t page_cgroup_ino(struct page *page); static inline bool mem_cgroup_disabled(void) { - if (memory_cgrp_subsys.disabled) - return true; - return false; + return !cgroup_subsys_enabled(memory_cgrp_subsys); } /* @@ -383,7 +383,7 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) return mz->lru_size[lru]; } -static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) +static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { unsigned long inactive_ratio; unsigned long inactive; @@ -402,24 +402,26 @@ static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) return inactive * inactive_ratio < active; } +void mem_cgroup_handle_over_high(void); + void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); static inline void mem_cgroup_oom_enable(void) { - WARN_ON(current->memcg_oom.may_oom); - current->memcg_oom.may_oom = 1; + WARN_ON(current->memcg_may_oom); + current->memcg_may_oom = 1; } static inline void mem_cgroup_oom_disable(void) { - WARN_ON(!current->memcg_oom.may_oom); - current->memcg_oom.may_oom = 0; + WARN_ON(!current->memcg_may_oom); + current->memcg_may_oom = 0; } static inline bool task_in_memcg_oom(struct task_struct *p) { - return p->memcg_oom.memcg; + return p->memcg_in_oom; } bool mem_cgroup_oom_synchronize(bool wait); @@ -536,9 +538,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } -static inline void mem_cgroup_migrate(struct page *oldpage, - struct page *newpage, - bool lrucare) +static inline void mem_cgroup_replace_page(struct page *old, struct page *new) { } @@ -584,10 +584,10 @@ static inline bool mem_cgroup_disabled(void) return true; } -static inline int +static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { - return 1; + return true; } static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) @@ -621,6 +621,10 @@ static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) { } +static inline void mem_cgroup_handle_over_high(void) +{ +} + static inline void mem_cgroup_oom_enable(void) { } @@ -677,8 +681,9 @@ enum { struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback); +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback); #else /* CONFIG_CGROUP_WRITEBACK */ @@ -688,7 +693,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) } static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, - unsigned long *pavail, + unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback) { @@ -745,11 +751,10 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) * conditions, but because they are pretty simple, they are expected to be * fast. */ -bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, - int order); -void __memcg_kmem_commit_charge(struct page *page, - struct mem_cgroup *memcg, int order); -void __memcg_kmem_uncharge_pages(struct page *page, int order); +int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void __memcg_kmem_uncharge(struct page *page, int order); /* * helper for acessing a memcg's index. It will be used as an index in the @@ -764,77 +769,42 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep); -struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); - -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, - unsigned long nr_pages); -void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); - -/** - * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. - * @gfp: the gfp allocation flags. - * @memcg: a pointer to the memcg this was charged against. - * @order: allocation order. - * - * returns true if the memcg where the current task belongs can hold this - * allocation. - * - * We return true automatically if this allocation is not to be accounted to - * any memcg. - */ -static inline bool -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +static inline bool __memcg_kmem_bypass(gfp_t gfp) { if (!memcg_kmem_enabled()) return true; - if (gfp & __GFP_NOACCOUNT) return true; - /* - * __GFP_NOFAIL allocations will move on even if charging is not - * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it forcibly, but we hope - * those allocations are rare, and won't be worth the trouble. - */ - if (gfp & __GFP_NOFAIL) - return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; - - /* If the test is dying, just let it go. */ - if (unlikely(fatal_signal_pending(current))) - return true; - - return __memcg_kmem_newpage_charge(gfp, memcg, order); + return false; } /** - * memcg_kmem_uncharge_pages: uncharge pages from memcg - * @page: pointer to struct page being freed - * @order: allocation order. + * memcg_kmem_charge: charge a kmem page + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * + * Returns 0 on success, an error code on failure. */ -static inline void -memcg_kmem_uncharge_pages(struct page *page, int order) +static __always_inline int memcg_kmem_charge(struct page *page, + gfp_t gfp, int order) { - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge_pages(page, order); + if (__memcg_kmem_bypass(gfp)) + return 0; + return __memcg_kmem_charge(page, gfp, order); } /** - * memcg_kmem_commit_charge: embeds correct memcg in a page - * @page: pointer to struct page recently allocated - * @memcg: the memcg structure we charged against - * @order: allocation order. - * - * Needs to be called after memcg_kmem_newpage_charge, regardless of success or - * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit @page to @memcg. + * memcg_kmem_uncharge: uncharge a kmem page + * @page: page to uncharge + * @order: allocation order */ -static inline void -memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +static __always_inline void memcg_kmem_uncharge(struct page *page, int order) { - if (memcg_kmem_enabled() && memcg) - __memcg_kmem_commit_charge(page, memcg, order); + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge(page, order); } /** @@ -847,17 +817,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (!memcg_kmem_enabled()) - return cachep; - if (gfp & __GFP_NOACCOUNT) - return cachep; - if (gfp & __GFP_NOFAIL) + if (__memcg_kmem_bypass(gfp)) return cachep; - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) - return cachep; - if (unlikely(fatal_signal_pending(current))) - return cachep; - return __memcg_kmem_get_cache(cachep); } @@ -866,13 +827,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) if (memcg_kmem_enabled()) __memcg_kmem_put_cache(cachep); } - -static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) -{ - if (!memcg_kmem_enabled()) - return NULL; - return __mem_cgroup_from_kmem(ptr); -} #else #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) @@ -887,18 +841,12 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) return false; } -static inline bool -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { - return true; + return 0; } -static inline void memcg_kmem_uncharge_pages(struct page *page, int order) -{ -} - -static inline void -memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +static inline void memcg_kmem_uncharge(struct page *page, int order) { } @@ -924,11 +872,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { } - -static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) -{ - return NULL; -} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ - diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 8f60e899b33c..2ea574ff9714 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -11,6 +11,7 @@ struct zone; struct pglist_data; struct mem_section; struct memory_block; +struct resource; #ifdef CONFIG_MEMORY_HOTPLUG @@ -266,6 +267,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); extern int add_memory(int nid, u64 start, u64 size); +extern int add_memory_resource(int nid, struct resource *resource); extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, bool for_device); extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index 8fcad63fab55..d409ceb2231e 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -21,6 +21,7 @@ enum { CHIP_INVALID = 0, CHIP_PM800, CHIP_PM805, + CHIP_PM860, CHIP_MAX, }; diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 1dc385850ba2..57b45caaea80 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h @@ -124,6 +124,9 @@ struct arizona_pdata { /** Channel to use for headphone detection */ unsigned int hpdet_channel; + /** Use software comparison to determine mic presence */ + bool micd_software_compare; + /** Extra debounce timeout used during initial mic detection (ms) */ unsigned int micd_detect_debounce; @@ -181,6 +184,9 @@ struct arizona_pdata { /** GPIO for primary IRQ (used for edge triggered emulation) */ int irq_gpio; + + /** General purpose switch control */ + unsigned int gpsw; }; #endif diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index fdd70b3c7418..cd7e78eae006 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -242,6 +242,7 @@ #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0 #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1 #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2 +#define ARIZONA_HP_TEST_CTRL_1 0x4A4 #define ARIZONA_SPK_CTRL_2 0x4B5 #define ARIZONA_SPK_CTRL_3 0x4B6 #define ARIZONA_DAC_COMP_1 0x4DC @@ -1064,6 +1065,16 @@ #define ARIZONA_CLOCK_CONTROL 0xF00 #define ARIZONA_ANC_SRC 0xF01 #define ARIZONA_DSP_STATUS 0xF02 +#define ARIZONA_ANC_COEFF_START 0xF08 +#define ARIZONA_ANC_COEFF_END 0xF12 +#define ARIZONA_FCL_FILTER_CONTROL 0xF15 +#define ARIZONA_FCL_ADC_REFORMATTER_CONTROL 0xF17 +#define ARIZONA_FCL_COEFF_START 0xF18 +#define ARIZONA_FCL_COEFF_END 0xF69 +#define ARIZONA_FCR_FILTER_CONTROL 0xF70 +#define ARIZONA_FCR_ADC_REFORMATTER_CONTROL 0xF72 +#define ARIZONA_FCR_COEFF_START 0xF73 +#define ARIZONA_FCR_COEFF_END 0xFC4 #define ARIZONA_DSP1_CONTROL_1 0x1100 #define ARIZONA_DSP1_CLOCKING_1 0x1101 #define ARIZONA_DSP1_STATUS_1 0x1104 @@ -2359,9 +2370,9 @@ #define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */ #define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */ #define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */ -#define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */ -#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */ -#define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */ +#define ARIZONA_ACCDET_MODE_MASK 0x0007 /* ACCDET_MODE - [2:0] */ +#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [2:0] */ +#define ARIZONA_ACCDET_MODE_WIDTH 3 /* ACCDET_MODE - [2:0] */ /* * R667 (0x29B) - Headphone Detect 1 @@ -3702,6 +3713,13 @@ #define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */ /* + * R1188 (0x4A4) HP Test Ctrl 1 + */ +#define ARIZONA_HP1_TST_CAP_SEL_MASK 0x0003 /* HP1_TST_CAP_SEL - [1:0] */ +#define ARIZONA_HP1_TST_CAP_SEL_SHIFT 0 /* HP1_TST_CAP_SEL - [1:0] */ +#define ARIZONA_HP1_TST_CAP_SEL_WIDTH 2 /* HP1_TST_CAP_SEL - [1:0] */ + +/* * R1244 (0x4DC) - DAC comp 1 */ #define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */ @@ -8043,6 +8061,66 @@ #define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */ /* + * R3840 (0xF00) - Clock Control + */ +#define ARIZONA_EXT_NG_SEL_CLR 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_MASK 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_SHIFT 7 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_WIDTH 1 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_SET 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_MASK 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_SHIFT 6 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_WIDTH 1 /* EXT_NG_SEL_SET */ +#define ARIZONA_CLK_R_ENA_CLR 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_MASK 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_SHIFT 5 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_WIDTH 1 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_SET 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_MASK 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_SHIFT 4 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_WIDTH 1 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_CLR 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_MASK 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_SHIFT 3 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_WIDTH 1 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_SET 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_MASK 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_SHIFT 2 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_WIDTH 1 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_L_ENA_CLR 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_MASK 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_SHIFT 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_WIDTH 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_SET 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_MASK 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_SHIFT 0 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_WIDTH 1 /* CLK_L_ENA_SET */ + +/* + * R3841 (0xF01) - ANC SRC + */ +#define ARIZONA_IN_RXANCR_SEL_MASK 0x0070 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_SHIFT 4 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_WIDTH 3 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCL_SEL_MASK 0x0007 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_SHIFT 0 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_WIDTH 3 /* IN_RXANCL_SEL - [0:2] */ + +/* + * R3863 (0xF17) - FCL ADC Reformatter Control + */ +#define ARIZONA_FCL_MIC_MODE_SEL 0x000C /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_SHIFT 2 /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_WIDTH 2 /* FCL_MIC_MODE_SEL - [2:3] */ + +/* + * R3954 (0xF72) - FCR ADC Reformatter Control + */ +#define ARIZONA_FCR_MIC_MODE_SEL 0x000C /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_SHIFT 2 /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_WIDTH 2 /* FCR_MIC_MODE_SEL - [2:3] */ + +/* * R4352 (0x1100) - DSP1 Control 1 */ #define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index cc8ad1e1a307..b24c771cebd5 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -11,6 +11,8 @@ #ifndef __LINUX_MFD_AXP20X_H #define __LINUX_MFD_AXP20X_H +#include <linux/regmap.h> + enum { AXP152_ID = 0, AXP202_ID, @@ -438,4 +440,26 @@ struct axp288_extcon_pdata { struct gpio_desc *gpio_mux_cntl; }; +/* generic helper function for reading 9-16 bit wide regs */ +static inline int axp20x_read_variable_width(struct regmap *regmap, + unsigned int reg, unsigned int width) +{ + unsigned int reg_val, result; + int err; + + err = regmap_read(regmap, reg, ®_val); + if (err) + return err; + + result = reg_val << (width - 8); + + err = regmap_read(regmap, reg + 1, ®_val); + if (err) + return err; + + result |= reg_val; + + return result; +} + #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index a76bc100bf97..27dac3ff18b9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -18,6 +18,12 @@ struct irq_domain; +/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ +struct mfd_cell_acpi_match { + const char *pnpid; + const unsigned long long adr; +}; + /* * This struct describes the MFD part ("cell"). * After registration the copy of this structure will become the platform data @@ -44,8 +50,8 @@ struct mfd_cell { */ const char *of_compatible; - /* Matches ACPI PNP id, either _HID or _CID */ - const char *acpi_pnpid; + /* Matches ACPI */ + const struct mfd_cell_acpi_match *acpi_match; /* * These resources can be specified relative to the parent device. diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index c4dd3a8add21..5010f978725c 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -65,6 +65,9 @@ #define DA9052_GPIO_2_3_REG 22 #define DA9052_GPIO_4_5_REG 23 #define DA9052_GPIO_6_7_REG 24 +#define DA9052_GPIO_8_9_REG 25 +#define DA9052_GPIO_10_11_REG 26 +#define DA9052_GPIO_12_13_REG 27 #define DA9052_GPIO_14_15_REG 28 /* POWER SEQUENCER CONTROL REGISTERS */ diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h index 76e668933a77..1bf50caeb9fa 100644 --- a/include/linux/mfd/da9150/core.h +++ b/include/linux/mfd/da9150/core.h @@ -15,6 +15,7 @@ #define __DA9150_CORE_H #include <linux/device.h> +#include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/regmap.h> @@ -46,23 +47,39 @@ #define DA9150_IRQ_GPADC 19 #define DA9150_IRQ_WKUP 20 +/* I2C sub-device address */ +#define DA9150_QIF_I2C_ADDR_LSB 0x5 + +struct da9150_fg_pdata { + u32 update_interval; /* msecs */ + u8 warn_soc_lvl; /* % value */ + u8 crit_soc_lvl; /* % value */ +}; + struct da9150_pdata { int irq_base; + struct da9150_fg_pdata *fg_pdata; }; struct da9150 { struct device *dev; struct regmap *regmap; + struct i2c_client *core_qif; + struct regmap_irq_chip_data *regmap_irq_data; int irq; int irq_base; }; -/* Device I/O */ +/* Device I/O - Query Interface for FG and standard register access */ +void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf); +void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf); + u8 da9150_reg_read(struct da9150 *da9150, u16 reg); void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); + #endif /* __DA9150_CORE_H */ diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_bxtwc.h new file mode 100644 index 000000000000..1a0ee9d6efe9 --- /dev/null +++ b/include/linux/mfd/intel_bxtwc.h @@ -0,0 +1,69 @@ +/* + * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/mfd/intel_soc_pmic.h> + +#ifndef __INTEL_BXTWC_H__ +#define __INTEL_BXTWC_H__ + +/* BXT WC devices */ +#define BXTWC_DEVICE1_ADDR 0x4E +#define BXTWC_DEVICE2_ADDR 0x4F +#define BXTWC_DEVICE3_ADDR 0x5E + +/* device1 Registers */ +#define BXTWC_CHIPID 0x4E00 +#define BXTWC_CHIPVER 0x4E01 + +#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A +#define BXTWC_CHGRCTRL0_ADDR 0x5E16 +#define BXTWC_CHGRCTRL1_ADDR 0x5E17 +#define BXTWC_CHGRCTRL2_ADDR 0x5E18 +#define BXTWC_CHGRSTATUS_ADDR 0x5E19 +#define BXTWC_THRMBATZONE_ADDR 0x4F22 + +#define BXTWC_USBPATH_ADDR 0x5E19 +#define BXTWC_USBPHYCTRL_ADDR 0x5E07 +#define BXTWC_USBIDCTRL_ADDR 0x5E05 +#define BXTWC_USBIDEN_MASK 0x01 +#define BXTWC_USBIDSTAT_ADDR 0x00FF +#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29 + +#define BXTWC_DBGUSBBC1_ADDR 0x5FE0 +#define BXTWC_DBGUSBBC2_ADDR 0x5FE1 +#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2 + +#define BXTWC_WAKESRC_ADDR 0x4E22 +#define BXTWC_WAKESRC2_ADDR 0x4EE5 +#define BXTWC_CHRTTADDR_ADDR 0x5E22 +#define BXTWC_CHRTTDATA_ADDR 0x5E23 + +#define BXTWC_STHRMIRQ0_ADDR 0x4F19 +#define WC_MTHRMIRQ1_ADDR 0x4E12 +#define WC_STHRMIRQ1_ADDR 0x4F1A +#define WC_STHRMIRQ2_ADDR 0x4F1B + +#define BXTWC_THRMZN0H_ADDR 0x4F44 +#define BXTWC_THRMZN0L_ADDR 0x4F45 +#define BXTWC_THRMZN1H_ADDR 0x4F46 +#define BXTWC_THRMZN1L_ADDR 0x4F47 +#define BXTWC_THRMZN2H_ADDR 0x4F48 +#define BXTWC_THRMZN2L_ADDR 0x4F49 +#define BXTWC_THRMZN3H_ADDR 0x4F4A +#define BXTWC_THRMZN3L_ADDR 0x4F4B +#define BXTWC_THRMZN4H_ADDR 0x4F4C +#define BXTWC_THRMZN4L_ADDR 0x4F4D + +#endif diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index abcbfcf32d10..cf619dbeace2 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -25,6 +25,8 @@ struct intel_soc_pmic { int irq; struct regmap *regmap; struct regmap_irq_chip_data *irq_chip_data; + struct regmap_irq_chip_data *irq_chip_data_level2; + struct device *dev; }; #endif /* __INTEL_SOC_PMIC_H__ */ diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index ff843e7ca23d..7eb7cbac0a9a 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -589,6 +589,7 @@ #define FORCE_ASPM_NO_ASPM 0x00 #define PM_CLK_FORCE_CTL 0xFE58 #define FUNC_FORCE_CTL 0xFE59 +#define FUNC_FORCE_UPME_XMT_DBG 0x02 #define PERST_GLITCH_WIDTH 0xFE5C #define CHANGE_LINK_STATE 0xFE5B #define RESET_LOAD_REG 0xFE5E @@ -712,6 +713,7 @@ #define PHY_RCR1 0x02 #define PHY_RCR1_ADP_TIME_4 0x0400 #define PHY_RCR1_VCO_COARSE 0x001F +#define PHY_RCR1_INIT_27S 0x0A1F #define PHY_SSCCR2 0x02 #define PHY_SSCCR2_PLL_NCODE 0x0A00 #define PHY_SSCCR2_TIME0 0x001C @@ -724,6 +726,7 @@ #define PHY_RCR2_FREQSEL_12 0x0040 #define PHY_RCR2_CDR_SC_12P 0x0010 #define PHY_RCR2_CALIB_LATE 0x0002 +#define PHY_RCR2_INIT_27S 0xC152 #define PHY_SSCCR3 0x03 #define PHY_SSCCR3_STEP_IN 0x2740 #define PHY_SSCCR3_CHECK_DELAY 0x0008 @@ -800,12 +803,14 @@ #define PHY_ANA1A_RXT_BIST 0x0500 #define PHY_ANA1A_TXR_BIST 0x0040 #define PHY_ANA1A_REV 0x0006 +#define PHY_FLD0_INIT_27S 0x2546 #define PHY_FLD1 0x1B #define PHY_FLD2 0x1C #define PHY_FLD3 0x1D #define PHY_FLD3_TIMER_4 0x0800 #define PHY_FLD3_TIMER_6 0x0020 #define PHY_FLD3_RXDELINK 0x0004 +#define PHY_FLD3_INIT_27S 0x0004 #define PHY_ANA1D 0x1D #define PHY_ANA1D_DEBUG_ADDR 0x0004 #define _PHY_FLD0 0x1D @@ -824,6 +829,7 @@ #define PHY_FLD4_BER_COUNT 0x00E0 #define PHY_FLD4_BER_TIMER 0x000A #define PHY_FLD4_BER_CHK_EN 0x0001 +#define PHY_FLD4_INIT_27S 0x5C7F #define PHY_DIG1E 0x1E #define PHY_DIG1E_REV 0x4000 #define PHY_DIG1E_D0_X_D1 0x1000 diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 75115384f3fc..a06098639399 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -132,6 +132,10 @@ struct sec_platform_data { int buck2_init; int buck3_init; int buck4_init; + /* Whether or not manually set PWRHOLD to low during shutdown. */ + bool manual_poweroff; + /* Disable the WRSTBI (buck voltage warm reset) when probing? */ + bool disable_wrstbi; }; /** diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 7981a9d77d3f..b288965e8101 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -179,6 +179,7 @@ enum s2mps11_regulators { #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ +#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) #define S2MPS11_BUCK2_RAMP_SHIFT 6 #define S2MPS11_BUCK34_RAMP_SHIFT 4 diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index b1fd675fa36f..239e977ba45d 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h @@ -184,5 +184,6 @@ enum s2mps13_regulators { * Let's assume that default value will be set. */ #define S2MPS13_BUCK_RAMP_DELAY 12500 +#define S2MPS13_REG_WRSTBI_MASK BIT(5) #endif /* __LINUX_MFD_S2MPS13_H */ diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h index 386743dd931c..8bc51180800a 100644 --- a/include/linux/mfd/tps6105x.h +++ b/include/linux/mfd/tps6105x.h @@ -10,6 +10,7 @@ #define MFD_TPS6105X_H #include <linux/i2c.h> +#include <linux/regmap.h> #include <linux/regulator/machine.h> /* @@ -82,20 +83,15 @@ struct tps6105x_platform_data { /** * struct tps6105x - state holder for the TPS6105x drivers - * @mutex: mutex to serialize I2C accesses * @i2c_client: corresponding I2C client * @regulator: regulator device if used in voltage mode + * @regmap: used for i2c communcation on accessing registers */ struct tps6105x { struct tps6105x_platform_data *pdata; - struct mutex lock; struct i2c_client *client; struct regulator_dev *regulator; + struct regmap *regmap; }; -extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value); -extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf); -extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg, - u8 bitmask, u8 bitvalues); - #endif diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index d5b5f76d57ef..27d7c95fd0da 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -91,7 +91,8 @@ struct mbus_hw_ops { struct mbus_device * mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, - struct mbus_hw_ops *hw_ops, void __iomem *mmio_va); + struct mbus_hw_ops *hw_ops, int index, + void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); int mbus_register_driver(struct mbus_driver *drv); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index baad4cb8e9b0..5a8677bafe04 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -833,6 +833,7 @@ struct mlx4_dev { struct mlx4_quotas quotas; struct radix_tree_root qp_table_tree; u8 rev_id; + u8 port_random_macs; char board_id[MLX4_BOARD_ID_LEN]; int numa_node; int oper_log_mgm_entry_size; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..0b473cbfa7ef 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { u8 rsvd[8]; }; -struct mlx5_cmd_query_special_contexts_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_query_special_contexts_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 dump_fill_mkey; - __be32 resd_lkey; -}; - struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; @@ -440,7 +429,7 @@ struct health_buffer { __be32 rsvd2; u8 irisc_index; u8 synd; - __be16 ext_sync; + __be16 ext_synd; }; struct mlx5_init_seg { @@ -450,7 +439,8 @@ struct mlx5_init_seg { __be32 cmdq_addr_h; __be32 cmdq_addr_l_sz; __be32 cmd_dbell; - __be32 rsvd1[121]; + __be32 rsvd1[120]; + __be32 initializing; struct health_buffer health; __be32 rsvd2[884]; __be32 health_counter; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..5c857f2a20d7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -391,9 +391,11 @@ struct mlx5_core_health { struct health_buffer __iomem *health; __be32 __iomem *health_counter; struct timer_list timer; - struct list_head list; u32 prev; int miss_counter; + bool sick; + struct workqueue_struct *wq; + struct work_struct work; }; struct mlx5_cq_table { @@ -485,8 +487,26 @@ struct mlx5_priv { spinlock_t ctx_lock; }; +enum mlx5_device_state { + MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum mlx5_interface_state { + MLX5_INTERFACE_STATE_DOWN, + MLX5_INTERFACE_STATE_UP, +}; + +enum mlx5_pci_status { + MLX5_PCI_STATUS_DISABLED, + MLX5_PCI_STATUS_ENABLED, +}; + struct mlx5_core_dev { struct pci_dev *pdev; + /* sync pci state */ + struct mutex pci_status_mutex; + enum mlx5_pci_status pci_status; u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; @@ -495,6 +515,10 @@ struct mlx5_core_dev { u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; + enum mlx5_device_state state; + /* sync interface state */ + struct mutex intf_state_mutex; + enum mlx5_interface_state interface_state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); @@ -676,8 +700,8 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); -void mlx5_health_cleanup(void); -void __init mlx5_health_init(void); +void mlx5_health_cleanup(struct mlx5_core_dev *dev); +int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, @@ -731,7 +755,7 @@ void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); #endif void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec); void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar); @@ -802,6 +826,11 @@ void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); +static inline int fw_initializing(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->initializing) >> 31; +} + static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; @@ -845,7 +874,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); struct mlx5_profile { u64 mask; @@ -866,4 +894,8 @@ static inline int mlx5_get_gid_table_len(u16 param) return 8 * (1 << param); } +enum { + MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, +}; + #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..906c46a05707 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -139,6 +139,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ @@ -202,6 +203,9 @@ extern unsigned int kobjsize(const void *objp); /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE +/* This mask is used to clear all the VMA flags used by mlock */ +#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) + /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. @@ -905,6 +909,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ + page->mem_cgroup = memcg; +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ +} +#endif + /* * Some inline functions in vmstat.h depend on page_zone() */ @@ -1585,8 +1610,10 @@ static inline void pgtable_init(void) static inline bool pgtable_page_ctor(struct page *page) { + if (!ptlock_init(page)) + return false; inc_zone_page_state(page, NR_PAGETABLE); - return ptlock_init(page); + return true; } static inline void pgtable_page_dtor(struct page *page) @@ -2015,8 +2042,6 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); -unsigned long max_sane_readahead(unsigned long nr); - /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2116,6 +2141,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ +#define FOLL_MLOCK 0x1000 /* lock present pages */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3d6baa7d4534..0a85da25a822 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -486,6 +486,9 @@ struct mm_struct { /* address of the bounds directory */ void __user *bd_addr; #endif +#ifdef CONFIG_HUGETLB_PAGE + atomic_long_t hugetlb_usage; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index fdd0779ccdfa..eb0151bac50c 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -269,7 +269,6 @@ struct mmc_card { /* for byte mode */ #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ /* (missing CIA registers) */ -#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */ #define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */ #define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 258daf914c6d..37967b6da03c 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -152,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); -extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, - bool, bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); -extern int mmc_send_tuning(struct mmc_host *host); +extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 134c57422740..f67b2ec18e6d 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -16,6 +16,7 @@ #include <linux/scatterlist.h> #include <linux/mmc/core.h> +#include <linux/dmaengine.h> #define MAX_MCI_SLOTS 2 @@ -40,6 +41,17 @@ enum { struct mmc_data; +enum { + TRANS_MODE_PIO = 0, + TRANS_MODE_IDMAC, + TRANS_MODE_EDMAC +}; + +struct dw_mci_dma_slave { + struct dma_chan *ch; + enum dma_transfer_direction direction; +}; + /** * struct dw_mci - MMC controller state shared between all slots * @lock: Spinlock protecting the queue and associated data. @@ -154,7 +166,14 @@ struct dw_mci { dma_addr_t sg_dma; void *sg_cpu; const struct dw_mci_dma_ops *dma_ops; + /* For idmac */ unsigned int ring_size; + + /* For edmac */ + struct dw_mci_dma_slave *dms; + /* Registers's physical base address */ + void *phy_regs; + u32 cmd_status; u32 data_status; u32 stop_cmdr; @@ -208,8 +227,8 @@ struct dw_mci { struct dw_mci_dma_ops { /* DMA Ops */ int (*init)(struct dw_mci *host); - void (*start)(struct dw_mci *host, unsigned int sg_len); - void (*complete)(struct dw_mci *host); + int (*start)(struct dw_mci *host, unsigned int sg_len); + void (*complete)(void *host); void (*stop)(struct dw_mci *host); void (*cleanup)(struct dw_mci *host); void (*exit)(struct dw_mci *host); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 83b81fd865f3..8673ffe3d86e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -292,18 +292,6 @@ struct mmc_host { mmc_pm_flag_t pm_caps; /* supported pm features */ -#ifdef CONFIG_MMC_CLKGATE - int clk_requests; /* internal reference counter */ - unsigned int clk_delay; /* number of MCI clk hold cycles */ - bool clk_gated; /* clock gated */ - struct delayed_work clk_gate_work; /* delayed clock gate */ - unsigned int clk_old; /* old clock value cache */ - spinlock_t clk_lock; /* lock for clk fields */ - struct mutex clk_gate_mutex; /* mutex for clock gating */ - struct device_attribute clkgate_delay_attr; - unsigned long clkgate_delay; -#endif - /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned short max_segs; /* see blk_queue_max_segments */ @@ -423,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_get_ocrmask(struct regulator *supply) { @@ -435,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, { return 0; } + +static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + return -EINVAL; +} #endif int mmc_regulator_get_supply(struct mmc_host *mmc); @@ -479,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host) return host->caps2 & MMC_CAP2_PACKED_WR; } -#ifdef CONFIG_MMC_CLKGATE -void mmc_host_clk_hold(struct mmc_host *host); -void mmc_host_clk_release(struct mmc_host *host); -unsigned int mmc_host_clk_rate(struct mmc_host *host); - -#else -static inline void mmc_host_clk_hold(struct mmc_host *host) -{ -} - -static inline void mmc_host_clk_release(struct mmc_host *host) -{ -} - -static inline unsigned int mmc_host_clk_rate(struct mmc_host *host) -{ - return host->ios.clock; -} -#endif - static inline int mmc_card_hs(struct mmc_card *card) { return card->host->ios.timing == MMC_TIMING_SD_HS || diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d94347737292..2d7e660cdefe 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -823,8 +823,7 @@ enum memmap_context { MEMMAP_HOTPLUG, }; extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, - unsigned long size, - enum memmap_context context); + unsigned long size); extern void lruvec_init(struct lruvec *lruvec); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 688997a24aad..64f36e09a790 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -219,6 +219,14 @@ struct serio_device_id { __u8 proto; }; +struct hda_device_id { + __u32 vendor_id; + __u32 rev_id; + __u8 api_version; + const char *name; + unsigned long driver_data; +}; + /* * Struct used for matching a device */ @@ -601,15 +609,13 @@ struct ipack_device_id { #define MEI_CL_MODULE_PREFIX "mei:" #define MEI_CL_NAME_SIZE 32 -#define MEI_CL_UUID_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" -#define MEI_CL_UUID_ARGS(_u) \ - _u[0], _u[1], _u[2], _u[3], _u[4], _u[5], _u[6], _u[7], \ - _u[8], _u[9], _u[10], _u[11], _u[12], _u[13], _u[14], _u[15] +#define MEI_CL_VERSION_ANY 0xff /** * struct mei_cl_device_id - MEI client device identifier * @name: helper name * @uuid: client uuid + * @version: client protocol version * @driver_info: information used by the driver. * * identifies mei client device by uuid and name @@ -617,6 +623,7 @@ struct ipack_device_id { struct mei_cl_device_id { char name[MEI_CL_NAME_SIZE]; uuid_le uuid; + __u8 version; kernel_ulong_t driver_info; }; diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 641b7d6fd096..3a5abe95affd 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -31,12 +31,7 @@ #define G10_MPI_H #include <linux/types.h> - -/* DSI defines */ - -#define SHA1_DIGEST_LENGTH 20 - -/*end of DSI defines */ +#include <linux/scatterlist.h> #define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8) #define BITS_PER_MPI_LIMB BITS_PER_LONG @@ -78,6 +73,7 @@ void mpi_swap(MPI a, MPI b); MPI do_encode_md(const void *sha_buffer, unsigned nbits); MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); +MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); int mpi_fromstr(MPI val, const char *str); u32 mpi_get_keyid(MPI a, u32 *keyid); void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); @@ -85,6 +81,8 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); +int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes, + int *sign); #define log_mpidump g10_log_mpidump diff --git a/include/linux/msi.h b/include/linux/msi.h index ad939d0ba816..f71a25e5fd25 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -163,6 +163,8 @@ struct msi_controller { int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, struct msi_desc *desc); + int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, + int nvec, int type); void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); }; @@ -174,6 +176,7 @@ struct msi_controller { struct irq_domain; struct irq_chip; struct device_node; +struct fwnode_handle; struct msi_domain_info; /** @@ -262,7 +265,7 @@ enum { int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force); -struct irq_domain *msi_create_irq_domain(struct device_node *of_node, +struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, @@ -270,7 +273,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); -struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, +struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, @@ -280,19 +283,26 @@ void platform_msi_domain_free_irqs(struct device *dev); #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); -struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, +struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, int nvec, int type); void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); -struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, +struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, struct msi_desc *desc); int pci_msi_domain_check_cap(struct irq_domain *domain, struct msi_domain_info *info, struct device *dev); +u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev); +struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev); +#else +static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +{ + return NULL; +} #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ #endif /* LINUX_MSI_H */ diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h deleted file mode 100644 index fe722c1fb61d..000000000000 --- a/include/linux/msm_mdp.h +++ /dev/null @@ -1,79 +0,0 @@ -/* include/linux/msm_mdp.h - * - * Copyright (C) 2007 Google Incorporated - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef _MSM_MDP_H_ -#define _MSM_MDP_H_ - -#include <linux/types.h> - -#define MSMFB_IOCTL_MAGIC 'm' -#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int) -#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int) - -enum { - MDP_RGB_565, /* RGB 565 planar */ - MDP_XRGB_8888, /* RGB 888 padded */ - MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */ - MDP_ARGB_8888, /* ARGB 888 */ - MDP_RGB_888, /* RGB 888 planar */ - MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */ - MDP_YCRYCB_H2V1, /* YCrYCb interleave */ - MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_RGBA_8888, /* ARGB 888 */ - MDP_BGRA_8888, /* ABGR 888 */ - MDP_RGBX_8888, /* RGBX 888 */ - MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */ -}; - -enum { - PMEM_IMG, - FB_IMG, -}; - -/* flag values */ -#define MDP_ROT_NOP 0 -#define MDP_FLIP_LR 0x1 -#define MDP_FLIP_UD 0x2 -#define MDP_ROT_90 0x4 -#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_DITHER 0x8 -#define MDP_BLUR 0x10 - -#define MDP_TRANSP_NOP 0xffffffff -#define MDP_ALPHA_NOP 0xff - -struct mdp_rect { - u32 x, y, w, h; -}; - -struct mdp_img { - u32 width, height, format, offset; - int memory_id; /* the file descriptor */ -}; - -struct mdp_blit_req { - struct mdp_img src; - struct mdp_img dst; - struct mdp_rect src_rect; - struct mdp_rect dst_rect; - u32 alpha, transp_mask, flags; -}; - -struct mdp_blit_req_list { - u32 count; - struct mdp_blit_req req[]; -}; - -#endif /* _MSM_MDP_H_ */ diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h index 5d0b2a1dee69..90a803aa42e8 100644 --- a/include/linux/n_r3964.h +++ b/include/linux/n_r3964.h @@ -152,9 +152,6 @@ struct r3964_info { unsigned char *rx_buf; /* ring buffer */ unsigned char *tx_buf; - wait_queue_head_t read_wait; - //struct wait_queue *read_wait; - struct r3964_block_header *rx_first; struct r3964_block_header *rx_last; struct r3964_block_header *tx_first; @@ -164,8 +161,9 @@ struct r3964_info { unsigned char last_rx; unsigned char bcc; unsigned int blocks_in_rx_queue; - - + + struct mutex read_lock; /* serialize r3964_read */ + struct r3964_client_info *firstClient; unsigned int state; unsigned int flags; diff --git a/include/linux/net.h b/include/linux/net.h index 049d4b03c4c4..70ac5e28e6b7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -24,7 +24,8 @@ #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <linux/kmemcheck.h> #include <linux/rcupdate.h> -#include <linux/jump_label.h> +#include <linux/once.h> + #include <uapi/linux/net.h> struct poll_table_struct; @@ -250,22 +251,8 @@ do { \ } while (0) #endif -bool __net_get_random_once(void *buf, int nbytes, bool *done, - struct static_key *done_key); - -#define net_get_random_once(buf, nbytes) \ - ({ \ - bool ___ret = false; \ - static bool ___done = false; \ - static struct static_key ___once_key = \ - STATIC_KEY_INIT_TRUE; \ - if (static_key_true(&___once_key)) \ - ___ret = __net_get_random_once(buf, \ - nbytes, \ - &___done, \ - &___once_key); \ - ___ret; \ - }) +#define net_get_random_once(buf, nbytes) \ + get_random_once((buf), (nbytes)) int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9672781c593d..f0d87347df19 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -125,6 +125,9 @@ enum { #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) +#define for_each_netdev_feature(mask_addr, bit) \ + for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) + /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ @@ -167,6 +170,12 @@ enum { */ #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) +/* + * If upper/master device has these features disabled, they must be disabled + * on all lower/slave devices as well. + */ +#define NETIF_F_UPPER_DISABLES NETIF_F_LRO + /* changeable features with no special hardware requirements */ #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88a00694eda5..a9e3bf42d287 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n) BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); + clear_bit(NAPI_STATE_NPSVC, &n->state); } #ifdef CONFIG_SMP @@ -717,8 +718,8 @@ struct xps_map { u16 queues[0]; }; #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) -#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ - / sizeof(u16)) +#define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ + - sizeof(struct xps_map)) / sizeof(u16)) /* * This structure holds all XPS maps for device. Maps are indexed by CPU. @@ -880,6 +881,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, * int max_tx_rate); * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); @@ -1053,6 +1055,10 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * This function is used to pass protocol port error state information * to the switch driver. The switch driver can react to the proto_down * by doing a phys down on the associated switch port. + * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); + * This function is used to get egress tunnel information for given skb. + * This is useful for retrieving outer tunnel header parameters while + * sampling packet. * */ struct net_device_ops { @@ -1108,6 +1114,8 @@ struct net_device_ops { int max_tx_rate); int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); + int (*ndo_set_vf_trust)(struct net_device *dev, + int vf, bool setting); int (*ndo_get_vf_config)(struct net_device *dev, int vf, struct ifla_vf_info *ivf); @@ -1226,6 +1234,8 @@ struct net_device_ops { int (*ndo_get_iflink)(const struct net_device *dev); int (*ndo_change_proto_down)(struct net_device *dev, bool proto_down); + int (*ndo_fill_metadata_dst)(struct net_device *dev, + struct sk_buff *skb); }; /** @@ -1257,9 +1267,10 @@ struct net_device_ops { * @IFF_LIVE_ADDR_CHANGE: device supports hardware address * change when it's running * @IFF_MACVLAN: Macvlan device - * @IFF_VRF_MASTER: device is a VRF master + * @IFF_L3MDEV_MASTER: device is an L3 master device * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master + * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1282,9 +1293,10 @@ enum netdev_priv_flags { IFF_XMIT_DST_RELEASE_PERM = 1<<17, IFF_IPVLAN_MASTER = 1<<18, IFF_IPVLAN_SLAVE = 1<<19, - IFF_VRF_MASTER = 1<<20, + IFF_L3MDEV_MASTER = 1<<20, IFF_NO_QUEUE = 1<<21, IFF_OPENVSWITCH = 1<<22, + IFF_L3MDEV_SLAVE = 1<<23, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1307,7 +1319,7 @@ enum netdev_priv_flags { #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE -#define IFF_VRF_MASTER IFF_VRF_MASTER +#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH @@ -1426,7 +1438,6 @@ enum netdev_priv_flags { * @dn_ptr: DECnet specific data * @ip6_ptr: IPv6 specific data * @ax25_ptr: AX.25 specific data - * @vrf_ptr: VRF specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering * * @last_rx: Time of last Rx @@ -1586,6 +1597,9 @@ struct net_device { #ifdef CONFIG_NET_SWITCHDEV const struct switchdev_ops *switchdev_ops; #endif +#ifdef CONFIG_NET_L3_MASTER_DEV + const struct l3mdev_ops *l3mdev_ops; +#endif const struct header_ops *header_ops; @@ -1645,7 +1659,6 @@ struct net_device { struct dn_dev __rcu *dn_ptr; struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; - struct net_vrf_dev __rcu *vrf_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; #if IS_ENABLED(CONFIG_MPLS_ROUTING) @@ -2102,6 +2115,7 @@ struct pcpu_sw_netstats { #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ #define NETDEV_CHANGEINFODATA 0x0018 #define NETDEV_BONDING_INFO 0x0019 +#define NETDEV_PRECHANGEUPPER 0x001A int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2202,6 +2216,7 @@ void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); int dev_get_iflink(const struct net_device *dev); +int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); @@ -2212,12 +2227,8 @@ int dev_open(struct net_device *dev); int dev_close(struct net_device *dev); int dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); -int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb); -int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb); -static inline int dev_queue_xmit(struct sk_buff *skb) -{ - return dev_queue_xmit_sk(skb->sk, skb); -} +int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); +int dev_queue_xmit(struct sk_buff *skb); int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); @@ -2989,11 +3000,7 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); -int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb); -static inline int netif_receive_skb(struct sk_buff *skb) -{ - return netif_receive_skb_sk(skb->sk, skb); -} +int netif_receive_skb(struct sk_buff *skb); gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); void napi_gro_flush(struct napi_struct *napi, bool flush_old); struct sk_buff *napi_get_frags(struct napi_struct *napi); @@ -3831,9 +3838,14 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } -static inline bool netif_is_vrf(const struct net_device *dev) +static inline bool netif_is_l3_master(const struct net_device *dev) { - return dev->priv_flags & IFF_VRF_MASTER; + return dev->priv_flags & IFF_L3MDEV_MASTER; +} + +static inline bool netif_is_l3_slave(const struct net_device *dev) +{ + return dev->priv_flags & IFF_L3MDEV_SLAVE; } static inline bool netif_is_bridge_master(const struct net_device *dev) @@ -3846,27 +3858,6 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) return dev->priv_flags & IFF_OPENVSWITCH; } -static inline bool netif_index_is_vrf(struct net *net, int ifindex) -{ - bool rc = false; - -#if IS_ENABLED(CONFIG_NET_VRF) - struct net_device *dev; - - if (ifindex == 0) - return false; - - rcu_read_lock(); - - dev = dev_get_by_index_rcu(net, ifindex); - if (dev) - rc = netif_is_vrf(dev); - - rcu_read_unlock(); -#endif - return rc; -} - /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 36a652531791..0ad556726181 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -54,8 +54,9 @@ struct nf_hook_state { struct net_device *in; struct net_device *out; struct sock *sk; + struct net *net; struct list_head *hook_list; - int (*okfn)(struct sock *, struct sk_buff *); + int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; static inline void nf_hook_state_init(struct nf_hook_state *p, @@ -65,7 +66,8 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, struct net_device *indev, struct net_device *outdev, struct sock *sk, - int (*okfn)(struct sock *, struct sk_buff *)) + struct net *net, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; p->thresh = thresh; @@ -73,11 +75,12 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, p->in = indev; p->out = outdev; p->sk = sk; + p->net = net; p->hook_list = hook_list; p->okfn = okfn; } -typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, +typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); @@ -87,7 +90,6 @@ struct nf_hook_ops { /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; - struct module *owner; void *priv; u_int8_t pf; unsigned int hooknum; @@ -167,32 +169,32 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); * value indicates the packet has been consumed by the hook. */ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, + struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, - int (*okfn)(struct sock *, struct sk_buff *), + int (*okfn)(struct net *, struct sock *, struct sk_buff *), int thresh) { - struct net *net = dev_net(indev ? indev : outdev); struct list_head *hook_list = &net->nf.hooks[pf][hook]; if (nf_hook_list_active(hook_list, pf, hook)) { struct nf_hook_state state; nf_hook_state_init(&state, hook_list, hook, thresh, - pf, indev, outdev, sk, okfn); + pf, indev, outdev, sk, net, okfn); return nf_hook_slow(skb, &state); } return 1; } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, - struct sk_buff *skb, struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sock *, struct sk_buff *)) +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); + return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN); } /* Activate hook; either okfn or kfree_skb called, unless a hook @@ -213,36 +215,38 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, */ static inline int -NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, +NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sock *, struct sk_buff *), int thresh) + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + int thresh) { - int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); + int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh); if (ret == 1) - ret = okfn(sk, skb); + ret = okfn(net, sk, skb); return ret; } static inline int -NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sock *, struct sk_buff *), bool cond) + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + bool cond) { int ret; if (!cond || - ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) - ret = okfn(sk, skb); + ((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1)) + ret = okfn(net, sk, skb); return ret; } static inline int -NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, +NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, - int (*okfn)(struct sock *, struct sk_buff *)) + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); + return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN); } /* Call setsockopt() */ @@ -278,7 +282,7 @@ struct nf_afinfo { struct flowi *fl, bool strict); void (*saveroute)(const struct sk_buff *skb, struct nf_queue_entry *entry); - int (*reroute)(struct sk_buff *skb, + int (*reroute)(struct net *net, struct sk_buff *skb, const struct nf_queue_entry *entry); int route_key_size; }; @@ -342,21 +346,27 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) } #else /* !CONFIG_NETFILTER */ -#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb) -#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) -static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, - struct sock *sk, - struct sk_buff *skb, - struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sock *sk, struct sk_buff *), int thresh) +static inline int +NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *), + bool cond) +{ + return okfn(net, sk, skb); +} + +static inline int +NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, + struct sk_buff *skb, struct net_device *in, struct net_device *out, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - return okfn(sk, skb); + return okfn(net, sk, skb); } -static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, - struct sk_buff *skb, struct net_device *indev, - struct net_device *outdev, - int (*okfn)(struct sock *, struct sk_buff *)) + +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, + struct sock *sk, struct sk_buff *skb, + struct net_device *indev, struct net_device *outdev, + int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return 1; } @@ -373,24 +383,28 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; +#else +static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +#endif struct nf_conn; enum ip_conntrack_info; struct nlattr; -struct nfq_ct_hook { +struct nfnl_ct_hook { + struct nf_conn *(*get_ct)(const struct sk_buff *skb, + enum ip_conntrack_info *ctinfo); size_t (*build_size)(const struct nf_conn *ct); - int (*build)(struct sk_buff *skb, struct nf_conn *ct); + int (*build)(struct sk_buff *skb, struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + u_int16_t ct_attr, u_int16_t ct_info_attr); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; -extern struct nfq_ct_hook __rcu *nfq_ct_hook; -#else -static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} -#endif +extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; /** * nf_skb_duplicated - TEE target has sent a packet diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index e955d4730625..249d1bb01e03 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -45,11 +45,11 @@ int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); #ifdef CONFIG_PROVE_LOCKING -int lockdep_nfnl_is_held(__u8 subsys_id); +bool lockdep_nfnl_is_held(__u8 subsys_id); #else -static inline int lockdep_nfnl_is_held(__u8 subsys_id) +static inline bool lockdep_nfnl_is_held(__u8 subsys_id) { - return 1; + return true; } #endif /* CONFIG_PROVE_LOCKING */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index b006b719183f..c5577410c25d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -13,6 +13,7 @@ * @target: the target extension * @matchinfo: per-match data * @targetinfo: per-target data + * @net network namespace through which the action was invoked * @in: input netdevice * @out: output netdevice * @fragoff: packet is a fragment, this is the data offset @@ -24,7 +25,6 @@ * Fields written to by extensions: * * @hotdrop: drop packet if we had inspection problems - * Network namespace obtainable using dev_net(in/out) */ struct xt_action_param { union { @@ -34,6 +34,7 @@ struct xt_action_param { union { const void *matchinfo, *targinfo; }; + struct net *net; const struct net_device *in, *out; int fragoff; unsigned int thoff; diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index c22a7fb8d0df..6f074db2f23d 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -53,7 +53,6 @@ extern struct xt_table *arpt_register_table(struct net *net, const struct arpt_replace *repl); extern void arpt_unregister_table(struct xt_table *table); extern unsigned int arpt_do_table(struct sk_buff *skb, - unsigned int hook, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 2437b8a5d7a9..2ed40c402b5e 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -17,7 +17,7 @@ enum nf_br_hook_priorities { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) -int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb); +int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); static inline void br_drop_fake_rtable(struct sk_buff *skb) { diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 8ca6d6464ea3..2ea517c7c6b9 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -111,9 +111,9 @@ struct ebt_table { extern struct ebt_table *ebt_register_table(struct net *net, const struct ebt_table *table); extern void ebt_unregister_table(struct net *net, struct ebt_table *table); -extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, - const struct net_device *in, const struct net_device *out, - struct ebt_table *table); +extern unsigned int ebt_do_table(struct sk_buff *skb, + const struct nf_hook_state *state, + struct ebt_table *table); /* Used in the kernel match() functions */ #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h index cb0727fe2b3d..187feabe557c 100644 --- a/include/linux/netfilter_ingress.h +++ b/include/linux/netfilter_ingress.h @@ -17,7 +17,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb) nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, - skb->dev, NULL, NULL); + skb->dev, NULL, dev_net(skb->dev), NULL); return nf_hook_slow(skb, &state); } diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 6e4591bb54d4..98c03b2462b5 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -6,7 +6,7 @@ #include <uapi/linux/netfilter_ipv4.h> -int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type); +int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 4073510da485..aa598f942c01 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -64,7 +64,6 @@ struct ipt_error { extern void *ipt_alloc_initial_table(const struct xt_table *); extern unsigned int ipt_do_table(struct sk_buff *skb, - unsigned int hook, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 771574677e83..47c6b04c28c0 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -17,12 +17,12 @@ struct nf_ipv6_ops { int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); void (*route_input)(struct sk_buff *skb); - int (*fragment)(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)); + int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); }; #ifdef CONFIG_NETFILTER -int ip6_route_me_harder(struct sk_buff *skb); +int ip6_route_me_harder(struct net *net, struct sk_buff *skb); __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index b40d2b635778..0f76e5c674f9 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -30,7 +30,6 @@ extern struct xt_table *ip6t_register_table(struct net *net, const struct ip6t_replace *repl); extern void ip6t_unregister_table(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, - unsigned int hook, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 78488e099ce7..7ec5b86735f3 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -73,6 +73,7 @@ extern int watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long *watchdog_cpumask_bits; extern int sysctl_softlockup_all_cpu_backtrace; +extern int sysctl_hardlockup_all_cpu_backtrace; struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b5812c395351..3af5f454c04a 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -15,10 +15,7 @@ #ifndef _LINUX_NVME_H #define _LINUX_NVME_H -#include <uapi/linux/nvme.h> -#include <linux/pci.h> -#include <linux/kref.h> -#include <linux/blk-mq.h> +#include <linux/types.h> struct nvme_bar { __u64 cap; /* Controller Capabilities */ @@ -76,115 +73,528 @@ enum { NVME_CSTS_SHST_MASK = 3 << 2, }; -extern unsigned char nvme_io_timeout; -#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) +struct nvme_id_power_state { + __le16 max_power; /* centiwatts */ + __u8 rsvd2; + __u8 flags; + __le32 entry_lat; /* microseconds */ + __le32 exit_lat; /* microseconds */ + __u8 read_tput; + __u8 read_lat; + __u8 write_tput; + __u8 write_lat; + __le16 idle_power; + __u8 idle_scale; + __u8 rsvd19; + __le16 active_power; + __u8 active_work_scale; + __u8 rsvd23[9]; +}; -/* - * Represents an NVM Express device. Each nvme_dev is a PCI function. - */ -struct nvme_dev { - struct list_head node; - struct nvme_queue **queues; - struct request_queue *admin_q; - struct blk_mq_tag_set tagset; - struct blk_mq_tag_set admin_tagset; - u32 __iomem *dbs; - struct device *dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; - int instance; - unsigned queue_count; - unsigned online_queues; - unsigned max_qid; - int q_depth; - u32 db_stride; - u32 ctrl_config; - struct msix_entry *entry; - struct nvme_bar __iomem *bar; - struct list_head namespaces; - struct kref kref; - struct device *device; - work_func_t reset_workfn; - struct work_struct reset_work; - struct work_struct probe_work; - struct work_struct scan_work; - char name[12]; - char serial[20]; - char model[40]; - char firmware_rev[8]; - bool subsystem; - u32 max_hw_sectors; - u32 stripe_size; - u32 page_size; - void __iomem *cmb; - dma_addr_t cmb_dma_addr; - u64 cmb_size; - u32 cmbsz; - u16 oncs; - u16 abort_limit; - u8 event_limit; - u8 vwc; +enum { + NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, + NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, }; -/* - * An NVM Express namespace is equivalent to a SCSI LUN - */ -struct nvme_ns { - struct list_head list; +struct nvme_id_ctrl { + __le16 vid; + __le16 ssvid; + char sn[20]; + char mn[40]; + char fr[8]; + __u8 rab; + __u8 ieee[3]; + __u8 mic; + __u8 mdts; + __le16 cntlid; + __le32 ver; + __u8 rsvd84[172]; + __le16 oacs; + __u8 acl; + __u8 aerl; + __u8 frmw; + __u8 lpa; + __u8 elpe; + __u8 npss; + __u8 avscc; + __u8 apsta; + __le16 wctemp; + __le16 cctemp; + __u8 rsvd270[242]; + __u8 sqes; + __u8 cqes; + __u8 rsvd514[2]; + __le32 nn; + __le16 oncs; + __le16 fuses; + __u8 fna; + __u8 vwc; + __le16 awun; + __le16 awupf; + __u8 nvscc; + __u8 rsvd531; + __le16 acwu; + __u8 rsvd534[2]; + __le32 sgls; + __u8 rsvd540[1508]; + struct nvme_id_power_state psd[32]; + __u8 vs[1024]; +}; - struct nvme_dev *dev; - struct request_queue *queue; - struct gendisk *disk; +enum { + NVME_CTRL_ONCS_COMPARE = 1 << 0, + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, + NVME_CTRL_ONCS_DSM = 1 << 2, + NVME_CTRL_VWC_PRESENT = 1 << 0, +}; - unsigned ns_id; - int lba_shift; - u16 ms; - bool ext; - u8 pi_type; - u64 mode_select_num_blocks; - u32 mode_select_block_len; +struct nvme_lbaf { + __le16 ms; + __u8 ds; + __u8 rp; }; -/* - * The nvme_iod describes the data in an I/O, including the list of PRP - * entries. You can't see it in this data structure because C doesn't let - * me express that. Use nvme_alloc_iod to ensure there's enough space - * allocated to store the PRP list. - */ -struct nvme_iod { - unsigned long private; /* For the use of the submitter of the I/O */ - int npages; /* In the PRP list. 0 means small pool in use */ - int offset; /* Of PRP list */ - int nents; /* Used in scatterlist */ - int length; /* Of data, in bytes */ - dma_addr_t first_dma; - struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ - struct scatterlist sg[0]; -}; - -static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) -{ - return (sector >> (ns->lba_shift - 9)); -} - -int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - void *buf, unsigned bufflen); -int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - void *buffer, void __user *ubuffer, unsigned bufflen, - u32 *result, unsigned timeout); -int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); -int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, - struct nvme_id_ns **id); -int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); -int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, - dma_addr_t dma_addr, u32 *result); -int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, - dma_addr_t dma_addr, u32 *result); - -struct sg_io_hdr; - -int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); -int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); -int nvme_sg_get_version_num(int __user *ip); +struct nvme_id_ns { + __le64 nsze; + __le64 ncap; + __le64 nuse; + __u8 nsfeat; + __u8 nlbaf; + __u8 flbas; + __u8 mc; + __u8 dpc; + __u8 dps; + __u8 nmic; + __u8 rescap; + __u8 fpi; + __u8 rsvd33; + __le16 nawun; + __le16 nawupf; + __le16 nacwu; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __u16 rsvd46; + __le64 nvmcap[2]; + __u8 rsvd64[40]; + __u8 nguid[16]; + __u8 eui64[8]; + struct nvme_lbaf lbaf[16]; + __u8 rsvd192[192]; + __u8 vs[3712]; +}; + +enum { + NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FLBAS_LBA_MASK = 0xf, + NVME_NS_FLBAS_META_EXT = 0x10, + NVME_LBAF_RP_BEST = 0, + NVME_LBAF_RP_BETTER = 1, + NVME_LBAF_RP_GOOD = 2, + NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 1 << 4, + NVME_NS_DPC_PI_FIRST = 1 << 3, + NVME_NS_DPC_PI_TYPE3 = 1 << 2, + NVME_NS_DPC_PI_TYPE2 = 1 << 1, + NVME_NS_DPC_PI_TYPE1 = 1 << 0, + NVME_NS_DPS_PI_FIRST = 1 << 3, + NVME_NS_DPS_PI_MASK = 0x7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, +}; + +struct nvme_smart_log { + __u8 critical_warning; + __u8 temperature[2]; + __u8 avail_spare; + __u8 spare_thresh; + __u8 percent_used; + __u8 rsvd6[26]; + __u8 data_units_read[16]; + __u8 data_units_written[16]; + __u8 host_reads[16]; + __u8 host_writes[16]; + __u8 ctrl_busy_time[16]; + __u8 power_cycles[16]; + __u8 power_on_hours[16]; + __u8 unsafe_shutdowns[16]; + __u8 media_errors[16]; + __u8 num_err_log_entries[16]; + __le32 warning_temp_time; + __le32 critical_comp_time; + __le16 temp_sensor[8]; + __u8 rsvd216[296]; +}; + +enum { + NVME_SMART_CRIT_SPARE = 1 << 0, + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, + NVME_SMART_CRIT_RELIABILITY = 1 << 2, + NVME_SMART_CRIT_MEDIA = 1 << 3, + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, +}; + +enum { + NVME_AER_NOTICE_NS_CHANGED = 0x0002, +}; + +struct nvme_lba_range_type { + __u8 type; + __u8 attributes; + __u8 rsvd2[14]; + __u64 slba; + __u64 nlb; + __u8 guid[16]; + __u8 rsvd48[16]; +}; + +enum { + NVME_LBART_TYPE_FS = 0x01, + NVME_LBART_TYPE_RAID = 0x02, + NVME_LBART_TYPE_CACHE = 0x03, + NVME_LBART_TYPE_SWAP = 0x04, + + NVME_LBART_ATTRIB_TEMP = 1 << 0, + NVME_LBART_ATTRIB_HIDE = 1 << 1, +}; + +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[13]; + struct { + __le16 cntlid; + __u8 rcsts; + __u8 resv3[5]; + __le64 hostid; + __le64 rkey; + } regctl_ds[]; +}; + +/* I/O commands */ + +enum nvme_opcode { + nvme_cmd_flush = 0x00, + nvme_cmd_write = 0x01, + nvme_cmd_read = 0x02, + nvme_cmd_write_uncor = 0x04, + nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, + nvme_cmd_dsm = 0x09, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, +}; + +struct nvme_common_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __le32 cdw10[6]; +}; + +struct nvme_rw_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + __le64 prp1; + __le64 prp2; + __le64 slba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +enum { + NVME_RW_LR = 1 << 15, + NVME_RW_FUA = 1 << 14, + NVME_RW_DSM_FREQ_UNSPEC = 0, + NVME_RW_DSM_FREQ_TYPICAL = 1, + NVME_RW_DSM_FREQ_RARE = 2, + NVME_RW_DSM_FREQ_READS = 3, + NVME_RW_DSM_FREQ_WRITES = 4, + NVME_RW_DSM_FREQ_RW = 5, + NVME_RW_DSM_FREQ_ONCE = 6, + NVME_RW_DSM_FREQ_PREFETCH = 7, + NVME_RW_DSM_FREQ_TEMP = 8, + NVME_RW_DSM_LATENCY_NONE = 0 << 4, + NVME_RW_DSM_LATENCY_IDLE = 1 << 4, + NVME_RW_DSM_LATENCY_NORM = 2 << 4, + NVME_RW_DSM_LATENCY_LOW = 3 << 4, + NVME_RW_DSM_SEQ_REQ = 1 << 6, + NVME_RW_DSM_COMPRESSED = 1 << 7, + NVME_RW_PRINFO_PRCHK_REF = 1 << 10, + NVME_RW_PRINFO_PRCHK_APP = 1 << 11, + NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, + NVME_RW_PRINFO_PRACT = 1 << 13, +}; + +struct nvme_dsm_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 nr; + __le32 attributes; + __u32 rsvd12[4]; +}; + +enum { + NVME_DSMGMT_IDR = 1 << 0, + NVME_DSMGMT_IDW = 1 << 1, + NVME_DSMGMT_AD = 1 << 2, +}; + +struct nvme_dsm_range { + __le32 cattr; + __le32 nlb; + __le64 slba; +}; + +/* Admin commands */ + +enum nvme_admin_opcode { + nvme_admin_delete_sq = 0x00, + nvme_admin_create_sq = 0x01, + nvme_admin_get_log_page = 0x02, + nvme_admin_delete_cq = 0x04, + nvme_admin_create_cq = 0x05, + nvme_admin_identify = 0x06, + nvme_admin_abort_cmd = 0x08, + nvme_admin_set_features = 0x09, + nvme_admin_get_features = 0x0a, + nvme_admin_async_event = 0x0c, + nvme_admin_activate_fw = 0x10, + nvme_admin_download_fw = 0x11, + nvme_admin_format_nvm = 0x80, + nvme_admin_security_send = 0x81, + nvme_admin_security_recv = 0x82, +}; + +enum { + NVME_QUEUE_PHYS_CONTIG = (1 << 0), + NVME_CQ_IRQ_ENABLED = (1 << 1), + NVME_SQ_PRIO_URGENT = (0 << 1), + NVME_SQ_PRIO_HIGH = (1 << 1), + NVME_SQ_PRIO_MEDIUM = (2 << 1), + NVME_SQ_PRIO_LOW = (3 << 1), + NVME_FEAT_ARBITRATION = 0x01, + NVME_FEAT_POWER_MGMT = 0x02, + NVME_FEAT_LBA_RANGE = 0x03, + NVME_FEAT_TEMP_THRESH = 0x04, + NVME_FEAT_ERR_RECOVERY = 0x05, + NVME_FEAT_VOLATILE_WC = 0x06, + NVME_FEAT_NUM_QUEUES = 0x07, + NVME_FEAT_IRQ_COALESCE = 0x08, + NVME_FEAT_IRQ_CONFIG = 0x09, + NVME_FEAT_WRITE_ATOMIC = 0x0a, + NVME_FEAT_ASYNC_EVENT = 0x0b, + NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_SW_PROGRESS = 0x80, + NVME_FEAT_HOST_ID = 0x81, + NVME_FEAT_RESV_MASK = 0x82, + NVME_FEAT_RESV_PERSIST = 0x83, + NVME_LOG_ERROR = 0x01, + NVME_LOG_SMART = 0x02, + NVME_LOG_FW_SLOT = 0x03, + NVME_LOG_RESERVATION = 0x80, + NVME_FWACT_REPL = (0 << 3), + NVME_FWACT_REPL_ACTV = (1 << 3), + NVME_FWACT_ACTV = (2 << 3), +}; + +struct nvme_identify { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 cns; + __u32 rsvd11[5]; +}; + +struct nvme_features { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[2]; + __le64 prp1; + __le64 prp2; + __le32 fid; + __le32 dword11; + __u32 rsvd12[4]; +}; + +struct nvme_create_cq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 cqid; + __le16 qsize; + __le16 cq_flags; + __le16 irq_vector; + __u32 rsvd12[4]; +}; + +struct nvme_create_sq { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __u64 rsvd8; + __le16 sqid; + __le16 qsize; + __le16 sq_flags; + __le16 cqid; + __u32 rsvd12[4]; +}; + +struct nvme_delete_queue { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 qid; + __u16 rsvd10; + __u32 rsvd11[5]; +}; + +struct nvme_abort_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[9]; + __le16 sqid; + __u16 cid; + __u32 rsvd11[5]; +}; + +struct nvme_download_firmware { + __u8 opcode; + __u8 flags; + __u16 command_id; + __u32 rsvd1[5]; + __le64 prp1; + __le64 prp2; + __le32 numd; + __le32 offset; + __u32 rsvd12[4]; +}; + +struct nvme_format_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2[4]; + __le32 cdw10; + __u32 rsvd11[5]; +}; + +struct nvme_command { + union { + struct nvme_common_command common; + struct nvme_rw_command rw; + struct nvme_identify identify; + struct nvme_features features; + struct nvme_create_cq create_cq; + struct nvme_create_sq create_sq; + struct nvme_delete_queue delete_queue; + struct nvme_download_firmware dlfw; + struct nvme_format_cmd format; + struct nvme_dsm_cmd dsm; + struct nvme_abort_cmd abort; + }; +}; + +enum { + NVME_SC_SUCCESS = 0x0, + NVME_SC_INVALID_OPCODE = 0x1, + NVME_SC_INVALID_FIELD = 0x2, + NVME_SC_CMDID_CONFLICT = 0x3, + NVME_SC_DATA_XFER_ERROR = 0x4, + NVME_SC_POWER_LOSS = 0x5, + NVME_SC_INTERNAL = 0x6, + NVME_SC_ABORT_REQ = 0x7, + NVME_SC_ABORT_QUEUE = 0x8, + NVME_SC_FUSED_FAIL = 0x9, + NVME_SC_FUSED_MISSING = 0xa, + NVME_SC_INVALID_NS = 0xb, + NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, + NVME_SC_LBA_RANGE = 0x80, + NVME_SC_CAP_EXCEEDED = 0x81, + NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, + NVME_SC_CQ_INVALID = 0x100, + NVME_SC_QID_INVALID = 0x101, + NVME_SC_QUEUE_SIZE = 0x102, + NVME_SC_ABORT_LIMIT = 0x103, + NVME_SC_ABORT_MISSING = 0x104, + NVME_SC_ASYNC_LIMIT = 0x105, + NVME_SC_FIRMWARE_SLOT = 0x106, + NVME_SC_FIRMWARE_IMAGE = 0x107, + NVME_SC_INVALID_VECTOR = 0x108, + NVME_SC_INVALID_LOG_PAGE = 0x109, + NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_INVALID_QUEUE = 0x10c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, + NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, + NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_INVALID_PI = 0x181, + NVME_SC_READ_ONLY = 0x182, + NVME_SC_WRITE_FAULT = 0x280, + NVME_SC_READ_ERROR = 0x281, + NVME_SC_GUARD_CHECK = 0x282, + NVME_SC_APPTAG_CHECK = 0x283, + NVME_SC_REFTAG_CHECK = 0x284, + NVME_SC_COMPARE_FAILED = 0x285, + NVME_SC_ACCESS_DENIED = 0x286, + NVME_SC_DNR = 0x4000, +}; + +struct nvme_completion { + __le32 result; /* Used by admin commands to return data */ + __u32 rsvd; + __le16 sq_head; /* how much of this queue may be reclaimed */ + __le16 sq_id; /* submission queue that generated this entry */ + __u16 command_id; /* of the command which completed */ + __le16 status; /* did the command fail, and if so, why? */ +}; + +#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) #endif /* _LINUX_NVME_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f3191828f037..87d6d1632dd4 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -29,6 +29,7 @@ struct device_node; */ enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 0x1, + OF_GPIO_SINGLE_ENDED = 0x2, }; #ifdef CONFIG_OF_GPIO diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 4bcbd586a672..65d969246a4d 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -46,6 +46,11 @@ extern int of_irq_get(struct device_node *dev, int index); extern int of_irq_get_byname(struct device_node *dev, const char *name); extern int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs); +extern struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token); +extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, + u32 rid); #else static inline int of_irq_count(struct device_node *dev) { @@ -64,6 +69,17 @@ static inline int of_irq_to_resource_table(struct device_node *dev, { return 0; } +static inline struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token) +{ + return NULL; +} +static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, + u32 rid) +{ + return NULL; +} #endif #if defined(CONFIG_OF) @@ -75,6 +91,7 @@ static inline int of_irq_to_resource_table(struct device_node *dev, extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); extern struct device_node *of_irq_find_parent(struct device_node *child); extern void of_msi_configure(struct device *dev, struct device_node *np); +u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); #else /* !CONFIG_OF */ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, @@ -87,6 +104,12 @@ static inline void *of_irq_find_parent(struct device_node *child) { return NULL; } + +static inline u32 of_msi_map_rid(struct device *dev, + struct device_node *msi_np, u32 rid_in) +{ + return rid_in; +} #endif /* !CONFIG_OF */ #endif /* __OF_IRQ_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 29fd3fe1c035..38c0533a3359 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -17,6 +17,7 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); int of_get_pci_domain_nr(struct device_node *node); void of_pci_dma_configure(struct pci_dev *pci_dev); +void of_pci_check_probe_only(void); #else static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) { @@ -53,6 +54,8 @@ of_get_pci_domain_nr(struct device_node *node) } static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } + +static inline void of_pci_check_probe_only(void) { } #endif #if defined(CONFIG_OF_ADDRESS) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index e5a70132a240..88fa8af2b937 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -17,7 +17,7 @@ #include <linux/platform_device.h> -#define INT_DMA_LCD 25 +#define INT_DMA_LCD (NR_IRQS_LEGACY + 25) #define OMAP1_DMA_TOUT_IRQ (1 << 0) #define OMAP_DMA_DROP_IRQ (1 << 1) diff --git a/include/linux/once.h b/include/linux/once.h new file mode 100644 index 000000000000..285f12cb40e6 --- /dev/null +++ b/include/linux/once.h @@ -0,0 +1,57 @@ +#ifndef _LINUX_ONCE_H +#define _LINUX_ONCE_H + +#include <linux/types.h> +#include <linux/jump_label.h> + +bool __do_once_start(bool *done, unsigned long *flags); +void __do_once_done(bool *done, struct static_key *once_key, + unsigned long *flags); + +/* Call a function exactly once. The idea of DO_ONCE() is to perform + * a function call such as initialization of random seeds, etc, only + * once, where DO_ONCE() can live in the fast-path. After @func has + * been called with the passed arguments, the static key will patch + * out the condition into a nop. DO_ONCE() guarantees type safety of + * arguments! + * + * Not that the following is not equivalent ... + * + * DO_ONCE(func, arg); + * DO_ONCE(func, arg); + * + * ... to this version: + * + * void foo(void) + * { + * DO_ONCE(func, arg); + * } + * + * foo(); + * foo(); + * + * In case the one-time invocation could be triggered from multiple + * places, then a common helper function must be defined, so that only + * a single static key will be placed there! + */ +#define DO_ONCE(func, ...) \ + ({ \ + bool ___ret = false; \ + static bool ___done = false; \ + static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \ + if (static_key_true(&___once_key)) { \ + unsigned long ___flags; \ + ___ret = __do_once_start(&___done, &___flags); \ + if (unlikely(___ret)) { \ + func(__VA_ARGS__); \ + __do_once_done(&___done, &___once_key, \ + &___flags); \ + } \ + } \ + ___ret; \ + }) + +#define get_random_once(buf, nbytes) \ + DO_ONCE(get_random_bytes, (buf), (nbytes)) + +#endif /* _LINUX_ONCE_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 416509e26d6d..a525e5067484 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -256,7 +256,7 @@ PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) * Must use a macro here due to header dependency issues. page_zone() is not * available at this point. */ -#define PageHighMem(__p) is_highmem(page_zone(__p)) +#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) #else PAGEFLAG_FALSE(HighMem) #endif diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 17fa4f8de3a6..7e62920a3a94 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -36,9 +36,9 @@ static inline unsigned long page_counter_read(struct page_counter *counter) void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); -int page_counter_try_charge(struct page_counter *counter, - unsigned long nr_pages, - struct page_counter **fail); +bool page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_memparse(const char *buf, const char *max, diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index a965efa52152..89ab0572dbc6 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -52,6 +52,30 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) return ACPI_HANDLE(dev); } +struct acpi_pci_root; +struct acpi_pci_root_ops; + +struct acpi_pci_root_info { + struct acpi_pci_root *root; + struct acpi_device *bridge; + struct acpi_pci_root_ops *ops; + struct list_head resources; + char name[16]; +}; + +struct acpi_pci_root_ops { + struct pci_ops *pci_ops; + int (*init_info)(struct acpi_pci_root_info *info); + void (*release_info)(struct acpi_pci_root_info *info); + int (*prepare_resources)(struct acpi_pci_root_info *info); +}; + +extern int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info); +extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, + struct acpi_pci_root_ops *ops, + struct acpi_pci_root_info *info, + void *sd); + void acpi_pci_add_bus(struct pci_bus *bus); void acpi_pci_remove_bus(struct pci_bus *bus); diff --git a/include/linux/pci.h b/include/linux/pci.h index e90eb22de628..e828e7b4afec 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -820,6 +820,7 @@ void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); +struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); @@ -1192,6 +1193,17 @@ void pci_unregister_driver(struct pci_driver *dev); module_driver(__pci_driver, pci_register_driver, \ pci_unregister_driver) +/** + * builtin_pci_driver() - Helper macro for registering a PCI driver + * @__pci_driver: pci_driver struct + * + * Helper macro for PCI drivers which do not do anything special in their + * init code. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(...) + */ +#define builtin_pci_driver(__pci_driver) \ + builtin_driver(__pci_driver, pci_register_driver) + struct pci_driver *pci_dev_driver(const struct pci_dev *dev); int pci_add_dynid(struct pci_driver *drv, unsigned int vendor, unsigned int device, diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 834c4e52cb2d..c2fa3ecb0dce 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -5,11 +5,12 @@ #include <linux/rwsem.h> #include <linux/percpu.h> #include <linux/wait.h> +#include <linux/rcu_sync.h> #include <linux/lockdep.h> struct percpu_rw_semaphore { + struct rcu_sync rss; unsigned int __percpu *fast_read_ctr; - atomic_t write_ctr; struct rw_semaphore rw_sem; atomic_t slow_read_ctr; wait_queue_head_t write_waitq; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 092a0e8a479a..d841d33bcdc9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -140,33 +140,67 @@ struct hw_perf_event { }; #endif }; + /* + * If the event is a per task event, this will point to the task in + * question. See the comment in perf_event_alloc(). + */ struct task_struct *target; + +/* + * hw_perf_event::state flags; used to track the PERF_EF_* state. + */ +#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ +#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ +#define PERF_HES_ARCH 0x04 + int state; + + /* + * The last observed hardware counter value, updated with a + * local64_cmpxchg() such that pmu::read() can be called nested. + */ local64_t prev_count; + + /* + * The period to start the next sample with. + */ u64 sample_period; + + /* + * The period we started this sample with. + */ u64 last_period; + + /* + * However much is left of the current period; note that this is + * a full 64bit value and allows for generation of periods longer + * than hardware might allow. + */ local64_t period_left; + + /* + * State for throttling the event, see __perf_event_overflow() and + * perf_adjust_freq_unthr_context(). + */ u64 interrupts_seq; u64 interrupts; + /* + * State for freq target events, see __perf_event_overflow() and + * perf_adjust_freq_unthr_context(). + */ u64 freq_time_stamp; u64 freq_count_stamp; #endif }; -/* - * hw_perf_event::state flags - */ -#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ -#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ -#define PERF_HES_ARCH 0x04 - struct perf_event; /* * Common implementation detail of pmu::{start,commit,cancel}_txn */ -#define PERF_EVENT_TXN 0x1 +#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ +#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ /** * pmu::capabilities flags @@ -210,7 +244,19 @@ struct pmu { /* * Try and initialize the event for this PMU. - * Should return -ENOENT when the @event doesn't match this PMU. + * + * Returns: + * -ENOENT -- @event is not for this PMU + * + * -ENODEV -- @event is for this PMU but PMU not present + * -EBUSY -- @event is for this PMU but PMU temporarily unavailable + * -EINVAL -- @event is for this PMU but @event is not valid + * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported + * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges + * + * 0 -- @event is for this PMU and valid + * + * Other error return values are allowed. */ int (*event_init) (struct perf_event *event); @@ -221,27 +267,61 @@ struct pmu { void (*event_mapped) (struct perf_event *event); /*optional*/ void (*event_unmapped) (struct perf_event *event); /*optional*/ + /* + * Flags for ->add()/->del()/ ->start()/->stop(). There are + * matching hw_perf_event::state flags. + */ #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ /* - * Adds/Removes a counter to/from the PMU, can be done inside - * a transaction, see the ->*_txn() methods. + * Adds/Removes a counter to/from the PMU, can be done inside a + * transaction, see the ->*_txn() methods. + * + * The add/del callbacks will reserve all hardware resources required + * to service the event, this includes any counter constraint + * scheduling etc. + * + * Called with IRQs disabled and the PMU disabled on the CPU the event + * is on. + * + * ->add() called without PERF_EF_START should result in the same state + * as ->add() followed by ->stop(). + * + * ->del() must always PERF_EF_UPDATE stop an event. If it calls + * ->stop() that must deal with already being stopped without + * PERF_EF_UPDATE. */ int (*add) (struct perf_event *event, int flags); void (*del) (struct perf_event *event, int flags); /* - * Starts/Stops a counter present on the PMU. The PMI handler - * should stop the counter when perf_event_overflow() returns - * !0. ->start() will be used to continue. + * Starts/Stops a counter present on the PMU. + * + * The PMI handler should stop the counter when perf_event_overflow() + * returns !0. ->start() will be used to continue. + * + * Also used to change the sample period. + * + * Called with IRQs disabled and the PMU disabled on the CPU the event + * is on -- will be called from NMI context with the PMU generates + * NMIs. + * + * ->stop() with PERF_EF_UPDATE will read the counter and update + * period/count values like ->read() would. + * + * ->start() with PERF_EF_RELOAD will reprogram the the counter + * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); /* * Updates the counter value of the event. + * + * For sampling capable PMUs this will also update the software period + * hw_perf_event::period_left field. */ void (*read) (struct perf_event *event); @@ -252,20 +332,26 @@ struct pmu { * * Start the transaction, after this ->add() doesn't need to * do schedulability tests. + * + * Optional. */ - void (*start_txn) (struct pmu *pmu); /* optional */ + void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. + * + * Optional. */ - int (*commit_txn) (struct pmu *pmu); /* optional */ + int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. + * + * Optional. */ - void (*cancel_txn) (struct pmu *pmu); /* optional */ + void (*cancel_txn) (struct pmu *pmu); /* * Will return the value for perf_event_mmap_page::index for this event, diff --git a/include/linux/phy.h b/include/linux/phy.h index 962387a192f1..05fde31b6dc6 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -19,6 +19,7 @@ #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/mii.h> +#include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> @@ -153,6 +154,7 @@ struct sk_buff; * PHYs should register using this structure */ struct mii_bus { + struct module *owner; const char *name; char id[MII_BUS_ID_SIZE]; void *priv; @@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void) return mdiobus_alloc_size(0); } -int mdiobus_register(struct mii_bus *bus); +int __mdiobus_register(struct mii_bus *bus, struct module *owner); +#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); @@ -210,7 +213,9 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); #define PHY_INTERRUPT_DISABLED 0x0 @@ -742,6 +747,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, struct phy_c45_device_ids *c45_ids); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); +void phy_device_remove(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); @@ -794,6 +800,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); void phy_device_free(struct phy_device *phydev); +int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h index 281cb91ddcf5..05082e407c4a 100644 --- a/include/linux/pinctrl/devinfo.h +++ b/include/linux/pinctrl/devinfo.h @@ -24,10 +24,14 @@ * struct dev_pin_info - pin state container for devices * @p: pinctrl handle for the containing device * @default_state: the default state for the handle, if found + * @init_state: the state at probe time, if found + * @sleep_state: the state at suspend time, if found + * @idle_state: the state at idle (runtime suspend) time, if found */ struct dev_pin_info { struct pinctrl *p; struct pinctrl_state *default_state; + struct pinctrl_state *init_state; #ifdef CONFIG_PM struct pinctrl_state *sleep_state; struct pinctrl_state *idle_state; @@ -35,6 +39,7 @@ struct dev_pin_info { }; extern int pinctrl_bind_pins(struct device *dev); +extern int pinctrl_init_done(struct device *dev); #else @@ -45,5 +50,10 @@ static inline int pinctrl_bind_pins(struct device *dev) return 0; } +static inline int pinctrl_init_done(struct device *dev) +{ + return 0; +} + #endif /* CONFIG_PINCTRL */ #endif /* PINCTRL_DEVINFO_H */ diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index fe65962b264f..d921afd5f109 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -20,6 +20,11 @@ /** * enum pin_config_param - possible pin configuration parameters + * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it + * weakly drives the last value on a tristate bus, also known as a "bus + * holder", "bus keeper" or "repeater". This allows another device on the + * bus to change the value by driving the bus high or low and switching to + * tristate. The argument is ignored. * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a * transition from say pull-up to pull-down implies that you disable * pull-up in the process, this setting disables all biasing. @@ -29,14 +34,6 @@ * if for example some other pin is going to drive the signal connected * to it for a while. Pins used for input are usually always high * impedance. - * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it - * weakly drives the last value on a tristate bus, also known as a "bus - * holder", "bus keeper" or "repeater". This allows another device on the - * bus to change the value by driving the bus high or low and switching to - * tristate. The argument is ignored. - * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high - * impedance to VDD). If the argument is != 0 pull-up is enabled, - * if it is 0, pull-up is total, i.e. the pin is connected to VDD. * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high * impedance to GROUND). If the argument is != 0 pull-down is enabled, * if it is 0, pull-down is total, i.e. the pin is connected to GROUND. @@ -48,10 +45,9 @@ * If the argument is != 0 pull up/down is enabled, if it is 0, the * configuration is ignored. The proper way to disable it is to use * @PIN_CONFIG_BIAS_DISABLE. - * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and - * low, this is the most typical case and is typically achieved with two - * active transistors on the output. Setting this config will enable - * push-pull mode, the argument is ignored. + * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high + * impedance to VDD). If the argument is != 0 pull-up is enabled, + * if it is 0, pull-up is total, i.e. the pin is connected to VDD. * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open * collector) which means it is usually wired with other output ports * which are then pulled up with an external resistor. Setting this @@ -59,28 +55,26 @@ * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source * (open emitter). Setting this config will enable open source mode, the * argument is ignored. + * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and + * low, this is the most typical case and is typically achieved with two + * active transistors on the output. Setting this config will enable + * push-pull mode, the argument is ignored. * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current * passed as argument. The argument is in mA. + * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, + * which means it will wait for signals to settle when reading inputs. The + * argument gives the debounce time in usecs. Setting the + * argument to zero turns debouncing off. * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not * affect the pin's ability to drive output. 1 enables input, 0 disables * input. - * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. - * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, - * schmitt-trigger mode is disabled. * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis, * the threshold value is given on a custom format as argument when * setting pins to this mode. - * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode, - * which means it will wait for signals to settle when reading inputs. The - * argument gives the debounce time in usecs. Setting the - * argument to zero turns debouncing off. - * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power - * supplies, the argument to this parameter (on a custom format) tells - * the driver which alternative power source to use. - * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to - * this parameter (on a custom format) tells the driver which alternative - * slew rate to use. + * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. + * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, + * schmitt-trigger mode is disabled. * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 @@ -89,29 +83,35 @@ * 1 to indicate high level, argument 0 to indicate low level. (Please * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a * discussion around this parameter.) + * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power + * supplies, the argument to this parameter (on a custom format) tells + * the driver which alternative power source to use. + * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to + * this parameter (on a custom format) tells the driver which alternative + * slew rate to use. * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. */ enum pin_config_param { + PIN_CONFIG_BIAS_BUS_HOLD, PIN_CONFIG_BIAS_DISABLE, PIN_CONFIG_BIAS_HIGH_IMPEDANCE, - PIN_CONFIG_BIAS_BUS_HOLD, - PIN_CONFIG_BIAS_PULL_UP, PIN_CONFIG_BIAS_PULL_DOWN, PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, - PIN_CONFIG_DRIVE_PUSH_PULL, + PIN_CONFIG_BIAS_PULL_UP, PIN_CONFIG_DRIVE_OPEN_DRAIN, PIN_CONFIG_DRIVE_OPEN_SOURCE, + PIN_CONFIG_DRIVE_PUSH_PULL, PIN_CONFIG_DRIVE_STRENGTH, + PIN_CONFIG_INPUT_DEBOUNCE, PIN_CONFIG_INPUT_ENABLE, - PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, - PIN_CONFIG_INPUT_DEBOUNCE, - PIN_CONFIG_POWER_SOURCE, - PIN_CONFIG_SLEW_RATE, + PIN_CONFIG_INPUT_SCHMITT_ENABLE, PIN_CONFIG_LOW_POWER_MODE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_POWER_SOURCE, + PIN_CONFIG_SLEW_RATE, PIN_CONFIG_END = 0x7FFF, }; diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h index b5919f8e6d1a..23073519339f 100644 --- a/include/linux/pinctrl/pinctrl-state.h +++ b/include/linux/pinctrl/pinctrl-state.h @@ -9,6 +9,13 @@ * hogs to configure muxing and pins at boot, and also as a state * to go into when returning from sleep and idle in * .pm_runtime_resume() or ordinary .resume() for example. + * @PINCTRL_STATE_INIT: normally the pinctrl will be set to "default" + * before the driver's probe() function is called. There are some + * drivers where that is not appropriate becausing doing so would + * glitch the pins. In those cases you can add an "init" pinctrl + * which is the state of the pins before drive probe. After probe + * if the pins are still in "init" state they'll be moved to + * "default". * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into * when the pins are idle. This is a state where the system is relaxed * but not fully sleeping - some power may be on but clocks gated for @@ -20,5 +27,6 @@ * ordinary .suspend() function. */ #define PINCTRL_STATE_DEFAULT "default" +#define PINCTRL_STATE_INIT "init" #define PINCTRL_STATE_IDLE "idle" #define PINCTRL_STATE_SLEEP "sleep" diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index 527a85c61924..91b16adab0cd 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -19,21 +19,6 @@ #include <linux/serial.h> #include <linux/platform_data/macb.h> -/* - * at91: 6 USARTs and one DBGU port (SAM9260) - * avr32: 4 - */ -#define ATMEL_MAX_UART 7 - - /* USB Device */ -struct at91_udc_data { - int vbus_pin; /* high == host powering us */ - u8 vbus_active_low; /* vbus polarity */ - u8 vbus_polled; /* Use polling, not interrupt */ - int pullup_pin; /* active == D+ pulled up */ - u8 pullup_active_low; /* true == pullup_pin is active low */ -}; - /* Compact Flash */ struct at91_cf_data { int irq_pin; /* I/O IRQ */ @@ -74,11 +59,6 @@ struct atmel_uart_data { struct serial_rs485 rs485; /* rs485 settings */ }; -/* CAN */ -struct at91_can_data { - void (*transceiver_switch)(int on); -}; - /* FIXME: this needs a better location, but gets stuff building again */ extern int at91_suspend_entering_slow_clock(void); diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h index 8a1f6a4920b2..3453fa655502 100644 --- a/include/linux/platform_data/dma-hsu.h +++ b/include/linux/platform_data/dma-hsu.h @@ -18,8 +18,4 @@ struct hsu_dma_slave { int chan_id; }; -struct hsu_dma_platform_data { - unsigned short nr_channels; -}; - #endif /* _PLATFORM_DATA_DMA_HSU_H */ diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h index d2be19a51acd..3c85a735c380 100644 --- a/include/linux/platform_data/leds-kirkwood-netxbig.h +++ b/include/linux/platform_data/leds-kirkwood-netxbig.h @@ -40,6 +40,7 @@ struct netxbig_led { int mode_addr; int *mode_val; int bright_addr; + int bright_max; }; struct netxbig_led_platform_data { diff --git a/include/linux/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h index 11f00cdabe3d..11f00cdabe3d 100644 --- a/include/linux/mdio-gpio.h +++ b/include/linux/platform_data/mdio-gpio.h diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h index ac91707dabcb..a6f9d633f5be 100644 --- a/include/linux/platform_data/nfcmrvl.h +++ b/include/linux/platform_data/nfcmrvl.h @@ -35,6 +35,14 @@ struct nfcmrvl_platform_data { unsigned int flow_control; /* Tell if firmware supports break control for power management */ unsigned int break_control; + + + /* + * I2C specific + */ + + unsigned int irq; + unsigned int irq_polarity; }; #endif /* _NFCMRVL_PTF_H_ */ diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h index 3f1cbf95ec3b..3982586ba6df 100644 --- a/include/linux/platform_data/s3c-hsotg.h +++ b/include/linux/platform_data/s3c-hsotg.h @@ -17,19 +17,19 @@ struct platform_device; -enum s3c_hsotg_dmamode { +enum dwc2_hsotg_dmamode { S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */ S3C_HSOTG_DMA_ONLY, /* always use DMA */ S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */ }; /** - * struct s3c_hsotg_plat - platform data for high-speed otg/udc + * struct dwc2_hsotg_plat - platform data for high-speed otg/udc * @dma: Whether to use DMA or not. * @is_osc: The clock source is an oscillator, not a crystal */ -struct s3c_hsotg_plat { - enum s3c_hsotg_dmamode dma; +struct dwc2_hsotg_plat { + enum dwc2_hsotg_dmamode dma; unsigned int is_osc:1; int phy_type; @@ -37,6 +37,6 @@ struct s3c_hsotg_plat { int (*phy_exit)(struct platform_device *pdev, int type); }; -extern void s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd); +extern void dwc2_hsotg_set_platdata(struct dwc2_hsotg_plat *pd); #endif /* __LINUX_USB_S3C_HSOTG_H */ diff --git a/include/linux/platform_data/st-nci.h b/include/linux/platform_data/st-nci.h index d9d400a297bd..f6494b347c06 100644 --- a/include/linux/platform_data/st-nci.h +++ b/include/linux/platform_data/st-nci.h @@ -24,6 +24,8 @@ struct st_nci_nfc_platform_data { unsigned int gpio_reset; unsigned int irq_polarity; + bool is_ese_present; + bool is_uicc_present; }; #endif /* _ST_NCI_H_ */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index bba08f44cc97..dc777be5f2e1 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -270,6 +270,14 @@ extern struct platform_device *__platform_create_bundle( struct resource *res, unsigned int n_res, const void *data, size_t size, struct module *module); +int __platform_register_drivers(struct platform_driver * const *drivers, + unsigned int count, struct module *owner); +void platform_unregister_drivers(struct platform_driver * const *drivers, + unsigned int count); + +#define platform_register_drivers(drivers, count) \ + __platform_register_drivers(drivers, count, THIS_MODULE) + /* early platform driver interface */ struct early_platform_driver { const char *class_str; diff --git a/include/linux/pm.h b/include/linux/pm.h index 35d599e7250d..528be6787796 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -732,6 +732,7 @@ extern int pm_generic_poweroff_noirq(struct device *dev); extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); +extern void pm_complete_with_resume_check(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b1cf7e797892..ba4ced38efae 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -15,7 +15,6 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/notifier.h> -#include <linux/cpuidle.h> /* Defines used for the flags field in the struct generic_pm_domain */ #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ @@ -38,11 +37,6 @@ struct gpd_dev_ops { bool (*active_wakeup)(struct device *dev); }; -struct gpd_cpuidle_data { - unsigned int saved_exit_latency; - struct cpuidle_state *idle_state; -}; - struct generic_pm_domain { struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ @@ -53,7 +47,6 @@ struct generic_pm_domain { struct dev_power_governor *gov; struct work_struct power_off_work; const char *name; - unsigned int in_progress; /* Number of devices being suspended now */ atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ unsigned int device_count; /* Number of devices */ @@ -68,7 +61,6 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; - struct gpd_cpuidle_data *cpuidle_data; int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, @@ -89,10 +81,8 @@ struct gpd_link { }; struct gpd_timing_data { - s64 stop_latency_ns; - s64 start_latency_ns; - s64 save_state_latency_ns; - s64 restore_state_latency_ns; + s64 suspend_latency_ns; + s64 resume_latency_ns; s64 effective_constraint_ns; bool constraint_changed; bool cached_stop_ok; @@ -125,29 +115,15 @@ extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td); -extern int __pm_genpd_name_add_device(const char *domain_name, - struct device *dev, - struct gpd_timing_data *td); - extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev); extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *new_subdomain); -extern int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name); extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); -extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); -extern int pm_genpd_name_attach_cpuidle(const char *name, int state); -extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); -extern int pm_genpd_name_detach_cpuidle(const char *name); extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); -extern int pm_genpd_poweron(struct generic_pm_domain *genpd); -extern int pm_genpd_name_poweron(const char *domain_name); -extern void pm_genpd_poweroff_unused(void); - extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; #else @@ -166,12 +142,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int __pm_genpd_name_add_device(const char *domain_name, - struct device *dev, - struct gpd_timing_data *td) -{ - return -ENOSYS; -} static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev) { @@ -182,45 +152,15 @@ static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, { return -ENOSYS; } -static inline int pm_genpd_add_subdomain_names(const char *master_name, - const char *subdomain_name) -{ - return -ENOSYS; -} static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target) { return -ENOSYS; } -static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) -{ - return -ENOSYS; -} -static inline int pm_genpd_name_attach_cpuidle(const char *name, int state) -{ - return -ENOSYS; -} -static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) -{ - return -ENOSYS; -} -static inline int pm_genpd_name_detach_cpuidle(const char *name) -{ - return -ENOSYS; -} static inline void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { } -static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) -{ - return -ENOSYS; -} -static inline int pm_genpd_name_poweron(const char *domain_name) -{ - return -ENOSYS; -} -static inline void pm_genpd_poweroff_unused(void) {} #endif static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, @@ -229,12 +169,6 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, return __pm_genpd_add_device(genpd, dev, NULL); } -static inline int pm_genpd_name_add_device(const char *domain_name, - struct device *dev) -{ - return __pm_genpd_name_add_device(domain_name, dev, NULL); -} - #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP extern void pm_genpd_syscore_poweroff(struct device *dev); extern void pm_genpd_syscore_poweron(struct device *dev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index e817722ee3f0..9a2e50337af9 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -132,37 +132,37 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) -int of_init_opp_table(struct device *dev); -void of_free_opp_table(struct device *dev); -int of_cpumask_init_opp_table(cpumask_var_t cpumask); -void of_cpumask_free_opp_table(cpumask_var_t cpumask); -int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); -int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_of_add_table(struct device *dev); +void dev_pm_opp_of_remove_table(struct device *dev); +int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); +void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); #else -static inline int of_init_opp_table(struct device *dev) +static inline int dev_pm_opp_of_add_table(struct device *dev) { return -EINVAL; } -static inline void of_free_opp_table(struct device *dev) +static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask) +static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) { return -ENOSYS; } -static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask) +static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) { } -static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) { return -ENOSYS; } -static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) { return -ENOSYS; } diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h deleted file mode 100644 index a857f719bf40..000000000000 --- a/include/linux/power/bq27x00_battery.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __LINUX_BQ27X00_BATTERY_H__ -#define __LINUX_BQ27X00_BATTERY_H__ - -/** - * struct bq27000_plaform_data - Platform data for bq27000 devices - * @name: Name of the battery. If NULL the driver will fallback to "bq27000". - * @read: HDQ read callback. - * This function should provide access to the HDQ bus the battery is - * connected to. - * The first parameter is a pointer to the battery device, the second the - * register to be read. The return value should either be the content of - * the passed register or an error value. - */ -struct bq27000_platform_data { - const char *name; - int (*read)(struct device *dev, unsigned int); -}; - -#endif diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h new file mode 100644 index 000000000000..45f6a7b5b3cb --- /dev/null +++ b/include/linux/power/bq27xxx_battery.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_BQ27X00_BATTERY_H__ +#define __LINUX_BQ27X00_BATTERY_H__ + +/** + * struct bq27xxx_plaform_data - Platform data for bq27xxx devices + * @name: Name of the battery. + * @chip: Chip class number of this device. + * @read: HDQ read callback. + * This function should provide access to the HDQ bus the battery is + * connected to. + * The first parameter is a pointer to the battery device, the second the + * register to be read. The return value should either be the content of + * the passed register or an error value. + */ +enum bq27xxx_chip { + BQ27000 = 1, /* bq27000, bq27200 */ + BQ27010, /* bq27010, bq27210 */ + BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27530, /* bq27530, bq27531 */ + BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ + BQ27545, /* bq27545 */ + BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ +}; + +struct bq27xxx_platform_data { + const char *name; + enum bq27xxx_chip chip; + int (*read)(struct device *dev, unsigned int); +}; + +#endif diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index eadf28cb2fc9..c4fa907c8f14 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -65,7 +65,7 @@ struct charger_cable { const char *extcon_name; const char *name; - /* The charger-manager use Exton framework*/ + /* The charger-manager use Extcon framework */ struct extcon_specific_cable_nb extcon_dev; struct work_struct wq; struct notifier_block nb; @@ -94,7 +94,7 @@ struct charger_cable { * the charger will be maintained with disabled state. * @cables: * the array of charger cables to enable/disable charger - * and set current limit according to constratint data of + * and set current limit according to constraint data of * struct charger_cable if only charger cable included * in the array of charger cables is attached/detached. * @num_cables: the number of charger cables. @@ -148,7 +148,7 @@ struct charger_regulator { * @polling_interval_ms: interval in millisecond at which * charger manager will monitor battery health * @battery_present: - * Specify where information for existance of battery can be obtained + * Specify where information for existence of battery can be obtained * @psy_charger_stat: the names of power-supply for chargers * @num_charger_regulator: the number of entries in charger_regulators * @charger_regulators: array of charger regulators @@ -156,7 +156,7 @@ struct charger_regulator { * @thermal_zone : the name of thermal zone for battery * @temp_min : Minimum battery temperature for charging. * @temp_max : Maximum battery temperature for charging. - * @temp_diff : Temperature diffential to restart charging. + * @temp_diff : Temperature difference to restart charging. * @measure_battery_temp: * true: measure battery temperature * false: measure ambient temperature diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 1d2cd21242e8..54bf1484d41f 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -48,9 +48,9 @@ struct pps_source_info { struct pps_event_time { #ifdef CONFIG_NTP_PPS - struct timespec ts_raw; + struct timespec64 ts_raw; #endif /* CONFIG_NTP_PPS */ - struct timespec ts_real; + struct timespec64 ts_real; }; /* The main struct */ @@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps, struct pps_device *pps_lookup_dev(void const *cookie); static inline void timespec_to_pps_ktime(struct pps_ktime *kt, - struct timespec ts) + struct timespec64 ts) { kt->sec = ts.tv_sec; kt->nsec = ts.tv_nsec; @@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, static inline void pps_get_ts(struct pps_event_time *ts) { - getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); + ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); } #else /* CONFIG_NTP_PPS */ static inline void pps_get_ts(struct pps_event_time *ts) { - getnstimeofday(&ts->ts_real); + ktime_get_real_ts64(&ts->ts_real); } #endif /* CONFIG_NTP_PPS */ /* Subtract known time delay from PPS event time(s) */ -static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) +static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) { - ts->ts_real = timespec_sub(ts->ts_real, delta); + ts->ts_real = timespec64_sub(ts->ts_real, delta); #ifdef CONFIG_NTP_PPS - ts->ts_raw = timespec_sub(ts->ts_raw, delta); + ts->ts_raw = timespec64_sub(ts->ts_raw, delta); #endif } diff --git a/include/linux/pr.h b/include/linux/pr.h new file mode 100644 index 000000000000..65c01c10b335 --- /dev/null +++ b/include/linux/pr.h @@ -0,0 +1,18 @@ +#ifndef LINUX_PR_H +#define LINUX_PR_H + +#include <uapi/linux/pr.h> + +struct pr_ops { + int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, + u32 flags); + int (*pr_reserve)(struct block_device *bdev, u64 key, + enum pr_type type, u32 flags); + int (*pr_release)(struct block_device *bdev, u64 key, + enum pr_type type); + int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, + enum pr_type type, bool abort); + int (*pr_clear)(struct block_device *bdev, u64 key); +}; + +#endif /* LINUX_PR_H */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bea8dd8ff5e0..75e4e30677f1 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -26,7 +26,6 @@ * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 * NMI_MASK: 0x00100000 - * PREEMPT_ACTIVE: 0x00200000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 @@ -53,10 +52,6 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#define PREEMPT_ACTIVE_BITS 1 -#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) -#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) - /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 @@ -126,8 +121,7 @@ * Check whether we were atomic before we did preempt_disable(): * (used by the scheduler) */ -#define in_atomic_preempt_off() \ - ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) +#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); @@ -146,18 +140,6 @@ extern void preempt_count_sub(int val); #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) -#define preempt_active_enter() \ -do { \ - preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ - barrier(); \ -} while (0) - -#define preempt_active_exit() \ -do { \ - barrier(); \ - preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ -} while (0) - #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ diff --git a/include/linux/property.h b/include/linux/property.h index a59c6ee566c2..463de52fe891 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -40,6 +40,8 @@ int device_property_read_string_array(struct device *dev, const char *propname, const char **val, size_t nval); int device_property_read_string(struct device *dev, const char *propname, const char **val); +int device_property_match_string(struct device *dev, + const char *propname, const char *string); bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, @@ -59,6 +61,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, size_t nval); int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val); +int fwnode_property_match_string(struct fwnode_handle *fwnode, + const char *propname, const char *string); struct fwnode_handle *device_get_next_child_node(struct device *dev, struct fwnode_handle *child); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 8e7a25b068b0..831479f8df8f 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -75,20 +75,8 @@ struct pstore_info { #define PSTORE_FLAGS_FRAGILE 1 -#ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); +extern void pstore_unregister(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); -#else -static inline int -pstore_register(struct pstore_info *psi) -{ - return -ENODEV; -} -static inline bool -pstore_cannot_block_path(enum kmsg_dump_reason reason) -{ - return false; -} -#endif #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index 159c987b1853..a079656b614c 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -32,9 +32,9 @@ #define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */ #define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ #define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ -#define PTP_CLASS_L2 0x30 /* event in a L2 packet */ -#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */ -#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged packet */ +#define PTP_CLASS_L2 0x40 /* event in a L2 packet */ +#define PTP_CLASS_PMASK 0x70 /* mask for the packet type field */ +#define PTP_CLASS_VLAN 0x80 /* event in a VLAN tagged packet */ #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */ @@ -42,6 +42,7 @@ #define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6) #define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2) #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) +#define PTP_CLASS_L4 (PTP_CLASS_IPV4 | PTP_CLASS_IPV6) #define PTP_EV_PORT 319 #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 92273776bce6..c2f2574ff61c 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -198,6 +198,7 @@ enum pxa_ssp_type { LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ LPSS_BYT_SSP, LPSS_SPT_SSP, + LPSS_BXT_SSP, }; struct ssp_device { diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h new file mode 100644 index 000000000000..6a4347639c03 --- /dev/null +++ b/include/linux/qed/common_hsi.h @@ -0,0 +1,607 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __COMMON_HSI__ +#define __COMMON_HSI__ + +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 4 +#define FW_REVISION_VERSION 2 +#define FW_ENGINEERING_VERSION 0 + +/***********************/ +/* COMMON HW CONSTANTS */ +/***********************/ + +/* PCI functions */ +#define MAX_NUM_PORTS_K2 (4) +#define MAX_NUM_PORTS_BB (2) +#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) + +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ + +#define MAX_NUM_VFS_K2 (192) +#define MAX_NUM_VFS_BB (120) +#define MAX_NUM_VFS (MAX_NUM_VFS_K2) + +#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) +#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_NUM_VPORTS_K2 (208) +#define MAX_NUM_VPORTS_BB (160) +#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) + +#define MAX_NUM_L2_QUEUES_K2 (320) +#define MAX_NUM_L2_QUEUES_BB (256) +#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) + +/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ +#define NUM_PHYS_TCS_4PORT_K2 (4) +#define NUM_OF_PHYS_TCS (8) + +#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) +#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) + +#define LB_TC (NUM_OF_PHYS_TCS) + +/* Num of possible traffic priority values */ +#define NUM_OF_PRIO (8) + +#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) +#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) +#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) +#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) + +/* CIDs */ +#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) + +/*****************/ +/* CDU CONSTANTS */ +/*****************/ + +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) + +/*****************/ +/* DQ CONSTANTS */ +/*****************/ + +/* DEMS */ +#define DQ_DEMS_LEGACY 0 + +/* XCM agg val selection */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection */ +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* XCM agg counter flag selection */ +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 + +/* XCM agg counter flag selection */ +#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF23) + +/*****************/ +/* QM CONSTANTS */ +/*****************/ + +/* number of TX queues in the QM */ +#define MAX_QM_TX_QUEUES_K2 512 +#define MAX_QM_TX_QUEUES_BB 448 +#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 + +/* number of Other queues in the QM */ +#define MAX_QM_OTHER_QUEUES_BB 64 +#define MAX_QM_OTHER_QUEUES_K2 128 +#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 + +/* number of queues in a PF queue group */ +#define QM_PF_QUEUE_GROUP_SIZE 8 + +/* base number of Tx PQs in the CM PQ representation. + * should be used when storing PQ IDs in CM PQ registers and context + */ +#define CM_TX_PQ_BASE 0x200 + +/* QM registers data */ +#define QM_LINE_CRD_REG_WIDTH 16 +#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_WIDTH 24 +#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_WIDTH 32 +#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_WIDTH 32 +#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) + +/*****************/ +/* CAU CONSTANTS */ +/*****************/ + +#define CAU_FSM_ETH_RX 0 +#define CAU_FSM_ETH_TX 1 + +/* Number of Protocol Indices per Status Block */ +#define PIS_PER_SB 12 + +#define CAU_HC_STOPPED_STATE 3 +#define CAU_HC_DISABLE_STATE 4 +#define CAU_HC_ENABLE_STATE 0 + +/*****************/ +/* IGU CONSTANTS */ +/*****************/ + +#define MAX_SB_PER_PATH_K2 (368) +#define MAX_SB_PER_PATH_BB (288) +#define MAX_TOT_SB_PER_PATH \ + MAX_SB_PER_PATH_K2 + +#define MAX_SB_PER_PF_MIMD 129 +#define MAX_SB_PER_PF_SIMD 64 +#define MAX_SB_PER_VF 64 + +/* Memory addresses on the BAR for the IGU Sub Block */ +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x0101 +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0202 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 + +#define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff + +/*****************/ +/* PXP CONSTANTS */ +/*****************/ + +/* PTT and GTT */ +#define PXP_NUM_PF_WINDOWS 12 +#define PXP_PER_PF_ENTRY_SIZE 8 +#define PXP_NUM_GLOBAL_WINDOWS 243 +#define PXP_GLOBAL_ENTRY_SIZE 4 +#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 +#define PXP_PF_WINDOW_ADMIN_START 0 +#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 +#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ + PXP_PF_WINDOW_ADMIN_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 +#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ + PXP_PER_PF_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 +#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ + PXP_GLOBAL_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ + (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) +#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 +#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 +#define PXP_PF_ME_OPAQUE_ADDR 0x1f8 +#define PXP_PF_ME_CONCRETE_ADDR 0x1fc + +#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS +#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_PF_WINDOW_END \ + (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) + +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ + (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +/* ILT Records */ +#define PXP_NUM_ILT_RECORDS_BB 7600 +#define PXP_NUM_ILT_RECORDS_K2 11000 +#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) + +/******************/ +/* PBF CONSTANTS */ +/******************/ + +/* Number of PBF command queue lines. Each line is 32B. */ +#define PBF_MAX_CMD_LINES 3328 + +/* Number of BTB blocks. Each block is 256B. */ +#define BTB_MAX_BLOCKS 1440 + +/*****************/ +/* PRS CONSTANTS */ +/*****************/ + +/* Async data KCQ CQE */ +struct async_data { + __le32 cid; + __le16 itid; + u8 error_code; + u8 fw_debug_param; +}; + +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct async_data async_info; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + +/* Multi function mode */ +enum mf_mode { + SF, + MF_OVLAN, + MF_NPAR, + MAX_MF_MODE +}; + +/* Per-protocol connection types */ +enum protocol_type { + PROTOCOLID_RESERVED1, + PROTOCOLID_RESERVED2, + PROTOCOLID_RESERVED3, + PROTOCOLID_CORE, + PROTOCOLID_ETH, + PROTOCOLID_RESERVED4, + PROTOCOLID_RESERVED5, + PROTOCOLID_PREROCE, + PROTOCOLID_COMMON, + PROTOCOLID_RESERVED6, + MAX_PROTOCOL_TYPE +}; + +/* status block structure */ +struct cau_pi_entry { + u32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +}; + +/* status block structure */ +struct cau_sb_entry { + u32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + u32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + +/* core doorbell data */ +struct core_db_data { + u8 params; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; +}; + +/* Enum of doorbell aggregative command selection */ +enum db_agg_cmd_sel { + DB_AGG_CMD_NOP, + DB_AGG_CMD_SET, + DB_AGG_CMD_ADD, + DB_AGG_CMD_MAX, + MAX_DB_AGG_CMD_SEL +}; + +/* Enum of doorbell destination */ +enum db_dest { + DB_DEST_XCM, + DB_DEST_UCM, + DB_DEST_TCM, + DB_NUM_DESTINATIONS, + MAX_DB_DEST +}; + +/* Structure for doorbell address, in legacy mode */ +struct db_legacy_addr { + __le32 addr; +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 +}; + +/* Igu interrupt command */ +enum igu_int_cmd { + IGU_INT_ENABLE = 0, + IGU_INT_DISABLE = 1, + IGU_INT_NOP = 2, + IGU_INT_NOP2 = 3, + MAX_IGU_INT_CMD +}; + +/* IGU producer or consumer update command */ +struct igu_prod_cons_update { + u32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + u32 reserved1; +}; + +/* Igu segments access for default status block only */ +enum igu_seg_access { + IGU_SEG_ACCESS_REG = 0, + IGU_SEG_ACCESS_ATTN = 1, + MAX_IGU_SEG_ACCESS +}; + +struct parsing_and_err_flags { + __le16 flags; +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +}; + +/* Concrete Function ID. */ +struct pxp_concrete_fid { + __le16 fid; +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 +}; + +struct pxp_pretend_concrete_fid { + __le16 fid; +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +}; + +union pxp_pretend_fid { + struct pxp_pretend_concrete_fid concrete_fid; + __le16 opaque_fid; +}; + +/* Pxp Pretend Command Register. */ +struct pxp_pretend_cmd { + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 +}; + +/* PTT Record in PXP Admin Window. */ +struct pxp_ptt_entry { + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; +}; + +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +/* status block structure */ +struct status_block { + __le16 pi_array[PIS_PER_SB]; + __le32 sb_num; +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +}; + +#endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h new file mode 100644 index 000000000000..320b3373ac1d --- /dev/null +++ b/include/linux/qed/eth_common.h @@ -0,0 +1,279 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __ETH_COMMON__ +#define __ETH_COMMON__ + +/********************/ +/* ETH FW CONSTANTS */ +/********************/ +#define ETH_CACHE_LINE_SIZE 64 + +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 +#define ETH_RX_NUM_NEXT_PAGE_SGES 2 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS + +#define ETH_REG_CQE_PBL_SIZE 3 + +/* num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 + +/* approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) + +/* ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 + +/* TPA constants */ +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_SGL_SIZE 3 +#define ETH_TPA_CQE_CONT_SGL_SIZE 6 +#define ETH_TPA_CQE_END_SGL_SIZE 4 + +/* Queue Zone sizes */ +#define TSTORM_QZONE_SIZE 0 +#define MSTORM_QZONE_SIZE sizeof(struct mstorm_eth_queue_zone) +#define USTORM_QZONE_SIZE sizeof(struct ustorm_eth_queue_zone) +#define XSTORM_QZONE_SIZE 0 +#define YSTORM_QZONE_SIZE sizeof(struct ystorm_eth_queue_zone) +#define PSTORM_QZONE_SIZE 0 + +/* Interrupt coalescing TimeSet */ +struct coalescing_timeset { + u8 timeset; + u8 valid; +}; + +struct eth_tx_1st_bd_flags { + u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +}; + +/* The parsing information data fo rthe first tx bd of a given packet. */ +struct eth_tx_data_1st_bd { + __le16 vlan; + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 fw_use_only; +}; + +/* The parsing information data for the second tx bd of a given packet. */ +struct eth_tx_data_2nd_bd { + __le16 tunn_ip_size; + __le16 bitfields; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 +}; + +/* Regular ETH Rx FP CQE. */ +struct eth_fast_path_rx_reg_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 + __le16 pkt_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_bd; + u8 placement_offset; + u8 reserved; + __le16 pbl[ETH_REG_CQE_PBL_SIZE]; + u8 reserved1[10]; +}; + +/* The L4 pseudo checksum mode for Ethernet */ +enum eth_l4_pseudo_checksum_mode { + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH, + ETH_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_ETH_L4_PSEUDO_CHECKSUM_MODE +}; + +struct eth_rx_bd { + struct regpair addr; +}; + +/* regular ETH Rx SP CQE */ +struct eth_slow_path_rx_cqe { + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[27]; + __le16 echo; +}; + +/* union for all ETH Rx CQE types */ +union eth_rx_cqe { + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_slow_path_rx_cqe slow_path; +}; + +/* ETH Rx CQE type */ +enum eth_rx_cqe_type { + ETH_RX_CQE_TYPE_UNUSED, + ETH_RX_CQE_TYPE_REGULAR, + ETH_RX_CQE_TYPE_SLOW_PATH, + MAX_ETH_RX_CQE_TYPE +}; + +/* ETH Rx producers data */ +struct eth_rx_prod_data { + __le16 bd_prod; + __le16 sge_prod; + __le16 cqe_prod; + __le16 reserved; +}; + +/* The first tx bd of a given packet */ +struct eth_tx_1st_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; +}; + +/* The second tx bd of a given packet */ +struct eth_tx_2nd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; +}; + +/* The parsing information data for the third tx bd of a given packet. */ +struct eth_tx_data_3rd_bd { + __le16 lso_mss; + u8 bitfields; +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 + u8 resereved0[3]; +}; + +/* The third tx bd of a given packet */ +struct eth_tx_3rd_bd { + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; +}; + +/* The common non-special TX BD ring element */ +struct eth_tx_bd { + struct regpair addr; + __le16 nbytes; + __le16 reserved0; + __le32 reserved1; +}; + +union eth_tx_bd_types { + struct eth_tx_1st_bd first_bd; + struct eth_tx_2nd_bd second_bd; + struct eth_tx_3rd_bd third_bd; + struct eth_tx_bd reg_bd; +}; + +/* Mstorm Queue Zone */ +struct mstorm_eth_queue_zone { + struct eth_rx_prod_data rx_producers; + __le32 reserved[2]; +}; + +/* Ustorm Queue Zone */ +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + __le16 reserved[3]; +}; + +/* Ystorm Queue Zone */ +struct ystorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset; + __le16 reserved[3]; +}; + +/* ETH doorbell data */ +struct eth_db_data { + u8 params; +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; +}; + +#endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h new file mode 100644 index 000000000000..b920c3605c46 --- /dev/null +++ b/include/linux/qed/qed_chain.h @@ -0,0 +1,539 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_CHAIN_H +#define _QED_CHAIN_H + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/qed/common_hsi.h> + +/* dma_addr_t manip */ +#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) +#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) +#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) + +enum qed_chain_mode { + /* Each Page contains a next pointer at its end */ + QED_CHAIN_MODE_NEXT_PTR, + + /* Chain is a single page (next ptr) is unrequired */ + QED_CHAIN_MODE_SINGLE, + + /* Page pointers are located in a side list */ + QED_CHAIN_MODE_PBL, +}; + +enum qed_chain_use_mode { + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ +}; + +struct qed_chain_next { + struct regpair next_phys; + void *next_virt; +}; + +struct qed_chain_pbl { + dma_addr_t p_phys_table; + void *p_virt_table; + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct qed_chain { + void *p_virt_addr; + dma_addr_t p_phys_addr; + void *p_prod_elem; + void *p_cons_elem; + u16 page_cnt; + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; /* used to produce/consume */ + u16 capacity; /*< number of _usable_ elements */ + u16 size; /* number of elements */ + u16 prod_idx; + u16 cons_idx; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_unusable; + u16 usable_per_page; + u16 elem_size; + u16 next_page_mask; + struct qed_chain_pbl pbl; +}; + +#define QED_CHAIN_PBL_ENTRY_SIZE (8) +#define QED_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ + (1 + ((sizeof(struct qed_chain_next) - 1) / \ + (elem_size))) : 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) + +/* Accessors */ +static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) +{ + return p_chain->prod_idx; +} + +static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +{ + return p_chain->cons_idx; +} + +static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +{ + u16 used; + + /* we don't need to trancate upon assignmet, as we assign u32->u16 */ + used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - + (u32)p_chain->cons_idx; + if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (used / p_chain->elem_per_page); + + return p_chain->capacity - used; +} + +static inline u8 qed_chain_is_full(struct qed_chain *p_chain) +{ + return qed_chain_get_elem_left(p_chain) == p_chain->capacity; +} + +static inline u8 qed_chain_is_empty(struct qed_chain *p_chain) +{ + return qed_chain_get_elem_left(p_chain) == 0; +} + +static inline u16 qed_chain_get_elem_per_page( + struct qed_chain *p_chain) +{ + return p_chain->elem_per_page; +} + +static inline u16 qed_chain_get_usable_per_page( + struct qed_chain *p_chain) +{ + return p_chain->usable_per_page; +} + +static inline u16 qed_chain_get_unusable_per_page( + struct qed_chain *p_chain) +{ + return p_chain->elem_unusable; +} + +static inline u16 qed_chain_get_size(struct qed_chain *p_chain) +{ + return p_chain->size; +} + +static inline dma_addr_t +qed_chain_get_pbl_phys(struct qed_chain *p_chain) +{ + return p_chain->pbl.p_phys_table; +} + +/** + * @brief qed_chain_advance_page - + * + * Advance the next element accros pages for a linked chain + * + * @param p_chain + * @param p_next_elem + * @param idx_to_inc + * @param page_to_inc + */ +static inline void +qed_chain_advance_page(struct qed_chain *p_chain, + void **p_next_elem, + u16 *idx_to_inc, + u16 *page_to_inc) + +{ + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + { + struct qed_chain_next *p_next = *p_next_elem; + *p_next_elem = p_next->next_virt; + *idx_to_inc += p_chain->elem_unusable; + break; + } + case QED_CHAIN_MODE_SINGLE: + *p_next_elem = p_chain->p_virt_addr; + break; + + case QED_CHAIN_MODE_PBL: + /* It is assumed pages are sequential, next element needs + * to change only when passing going back to first from last. + */ + if (++(*page_to_inc) == p_chain->page_cnt) { + *page_to_inc = 0; + *p_next_elem = p_chain->p_virt_addr; + } + } +} + +#define is_unusable_idx(p, idx) \ + (((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_next_idx(p, idx) \ + ((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define test_ans_skip(p, idx) \ + do { \ + if (is_unusable_idx(p, idx)) { \ + (p)->idx += (p)->elem_unusable; \ + } \ + } while (0) + +/** + * @brief qed_chain_return_multi_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + * @param num + */ +static inline void +qed_chain_return_multi_produced(struct qed_chain *p_chain, + u16 num) +{ + p_chain->cons_idx += num; + test_ans_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_return_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + */ +static inline void qed_chain_return_produced(struct qed_chain *p_chain) +{ + p_chain->cons_idx++; + test_ans_skip(p_chain, cons_idx); +} + +/** + * @brief qed_chain_produce - + * + * A chain in which the driver "Produces" elements should use this to get + * a pointer to the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for new element. + * + * @param p_chain + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_produce(struct qed_chain *p_chain) +{ + void *ret = NULL; + + if ((p_chain->prod_idx & p_chain->elem_per_page_mask) == + p_chain->next_page_mask) { + qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, + &p_chain->prod_idx, + &p_chain->pbl.prod_page_idx); + } + + ret = p_chain->p_prod_elem; + p_chain->prod_idx++; + p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + + p_chain->elem_size); + + return ret; +} + +/** + * @brief qed_chain_get_capacity - + * + * Get the maximum number of BDs in chain + * + * @param p_chain + * @param num + * + * @return u16, number of unusable BDs + */ +static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain) +{ + return p_chain->capacity; +} + +/** + * @brief qed_chain_recycle_consumed - + * + * Returns an element which was previously consumed; + * Increments producers so they could be written to FW. + * + * @param p_chain + */ +static inline void +qed_chain_recycle_consumed(struct qed_chain *p_chain) +{ + test_ans_skip(p_chain, prod_idx); + p_chain->prod_idx++; +} + +/** + * @brief qed_chain_consume - + * + * A Chain in which the driver utilizes data written by a different source + * (i.e., FW) should use this to access passed buffers. + * + * @param p_chain + * + * @return void*, a pointer to the next buffer written + */ +static inline void *qed_chain_consume(struct qed_chain *p_chain) +{ + void *ret = NULL; + + if ((p_chain->cons_idx & p_chain->elem_per_page_mask) == + p_chain->next_page_mask) { + qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, + &p_chain->cons_idx, + &p_chain->pbl.cons_page_idx); + } + + ret = p_chain->p_cons_elem; + p_chain->cons_idx++; + p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + + p_chain->elem_size); + + return ret; +} + +/** + * @brief qed_chain_reset - Resets the chain to its start state + * + * @param p_chain pointer to a previously allocted chain + */ +static inline void qed_chain_reset(struct qed_chain *p_chain) +{ + int i; + + p_chain->prod_idx = 0; + p_chain->cons_idx = 0; + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; + + if (p_chain->mode == QED_CHAIN_MODE_PBL) { + p_chain->pbl.prod_page_idx = p_chain->page_cnt - 1; + p_chain->pbl.cons_page_idx = p_chain->page_cnt - 1; + } + + switch (p_chain->intended_use) { + case QED_CHAIN_USE_TO_CONSUME_PRODUCE: + case QED_CHAIN_USE_TO_PRODUCE: + /* Do nothing */ + break; + + case QED_CHAIN_USE_TO_CONSUME: + /* produce empty elements */ + for (i = 0; i < p_chain->capacity; i++) + qed_chain_recycle_consumed(p_chain); + break; + } +} + +/** + * @brief qed_chain_init - Initalizes a basic chain struct + * + * @param p_chain + * @param p_virt_addr + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + */ +static inline void qed_chain_init(struct qed_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr, + u16 page_cnt, + u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; + p_chain->elem_size = elem_size; + p_chain->page_cnt = page_cnt; + p_chain->mode = mode; + + p_chain->intended_use = intended_use; + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = + USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + if (mode == QED_CHAIN_MODE_NEXT_PTR) { + struct qed_chain_next *p_next; + u16 i; + + for (i = 0; i < page_cnt - 1; i++) { + /* Increment mem_phy to the next page. */ + p_phys_addr += QED_CHAIN_PAGE_SIZE; + + /* Initialize the physical address of the next page. */ + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + elem_size * + p_chain-> + usable_per_page); + + p_next->next_phys.lo = DMA_LO_LE(p_phys_addr); + p_next->next_phys.hi = DMA_HI_LE(p_phys_addr); + + /* Initialize the virtual address of the next page. */ + p_next->next_virt = (void *)((u8 *)p_virt_addr + + QED_CHAIN_PAGE_SIZE); + + /* Move to the next page. */ + p_virt_addr = p_next->next_virt; + } + + /* Last page's next should point to beginning of the chain */ + p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + + elem_size * + p_chain->usable_per_page); + + p_next->next_phys.lo = DMA_LO_LE(p_chain->p_phys_addr); + p_next->next_phys.hi = DMA_HI_LE(p_chain->p_phys_addr); + p_next->next_virt = p_chain->p_virt_addr; + } + qed_chain_reset(p_chain); +} + +/** + * @brief qed_chain_pbl_init - Initalizes a basic pbl chain + * struct + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param use_mode + * @param p_phys_pbl pointer to a pre-allocated side table + * which will hold physical page addresses. + * @param p_virt_pbl pointer to a pre allocated side table + * which will hold virtual page addresses. + */ +static inline void +qed_chain_pbl_init(struct qed_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr, + u16 page_cnt, + u8 elem_size, + enum qed_chain_use_mode use_mode, + dma_addr_t p_phys_pbl, + dma_addr_t *p_virt_pbl) +{ + dma_addr_t *p_pbl_dma = p_virt_pbl; + int i; + + qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt, + elem_size, use_mode, QED_CHAIN_MODE_PBL); + + p_chain->pbl.p_phys_table = p_phys_pbl; + p_chain->pbl.p_virt_table = p_virt_pbl; + + /* Fill the PBL with physical addresses*/ + for (i = 0; i < page_cnt; i++) { + *p_pbl_dma = p_phys_addr; + p_phys_addr += QED_CHAIN_PAGE_SIZE; + p_pbl_dma++; + } +} + +/** + * @brief qed_chain_set_prod - sets the prod to the given + * value + * + * @param prod_idx + * @param p_prod_elem + */ +static inline void qed_chain_set_prod(struct qed_chain *p_chain, + u16 prod_idx, + void *p_prod_elem) +{ + p_chain->prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief qed_chain_get_elem - + * + * get a pointer to an element represented by absolute idx + * + * @param p_chain + * @assumption p_chain->size is a power of 2 + * + * @return void*, a pointer to next element + */ +static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain, + u16 idx) +{ + void *ret = NULL; + + if (idx >= p_chain->size) + return NULL; + + ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx; + + return ret; +} + +/** + * @brief qed_chain_sge_inc_cons_prod + * + * for sge chains, producer isn't increased serially, the ring + * is expected to be full at all times. Once elements are + * consumed, they are immediately produced. + * + * @param p_chain + * @param cnt + * + * @return inline void + */ +static inline void +qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain, + u16 cnt) +{ + p_chain->prod_idx += cnt; + p_chain->cons_idx += cnt; +} + +#endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h new file mode 100644 index 000000000000..81ab178e31c1 --- /dev/null +++ b/include/linux/qed/qed_eth_if.h @@ -0,0 +1,165 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_ETH_IF_H +#define _QED_ETH_IF_H + +#include <linux/list.h> +#include <linux/if_link.h> +#include <linux/qed/eth_common.h> +#include <linux/qed/qed_if.h> + +struct qed_dev_eth_info { + struct qed_dev_info common; + + u8 num_queues; + u8 num_tc; + + u8 port_mac[ETH_ALEN]; + u8 num_vlan_filters; +}; + +struct qed_update_vport_rss_params { + u16 rss_ind_table[128]; + u32 rss_key[10]; +}; + +struct qed_update_vport_params { + u8 vport_id; + u8 update_vport_active_flg; + u8 vport_active_flg; + u8 update_rss_flg; + struct qed_update_vport_rss_params rss_params; +}; + +struct qed_stop_rxq_params { + u8 rss_id; + u8 rx_queue_id; + u8 vport_id; + bool eq_completion_only; +}; + +struct qed_stop_txq_params { + u8 rss_id; + u8 tx_queue_id; +}; + +enum qed_filter_rx_mode_type { + QED_FILTER_RX_MODE_TYPE_REGULAR, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, + QED_FILTER_RX_MODE_TYPE_PROMISC, +}; + +enum qed_filter_xcast_params_type { + QED_FILTER_XCAST_TYPE_ADD, + QED_FILTER_XCAST_TYPE_DEL, + QED_FILTER_XCAST_TYPE_REPLACE, +}; + +struct qed_filter_ucast_params { + enum qed_filter_xcast_params_type type; + u8 vlan_valid; + u16 vlan; + u8 mac_valid; + unsigned char mac[ETH_ALEN]; +}; + +struct qed_filter_mcast_params { + enum qed_filter_xcast_params_type type; + u8 num; + unsigned char mac[64][ETH_ALEN]; +}; + +union qed_filter_type_params { + enum qed_filter_rx_mode_type accept_flags; + struct qed_filter_ucast_params ucast; + struct qed_filter_mcast_params mcast; +}; + +enum qed_filter_type { + QED_FILTER_TYPE_UCAST, + QED_FILTER_TYPE_MCAST, + QED_FILTER_TYPE_RX_MODE, + QED_MAX_FILTER_TYPES, +}; + +struct qed_filter_params { + enum qed_filter_type type; + union qed_filter_type_params filter; +}; + +struct qed_queue_start_common_params { + u8 rss_id; + u8 queue_id; + u8 vport_id; + u16 sb; + u16 sb_idx; +}; + +struct qed_eth_cb_ops { + struct qed_common_cb_ops common; +}; + +struct qed_eth_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_eth_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, + void *cookie); + + int (*vport_start)(struct qed_dev *cdev, + u8 vport_id, u16 mtu, + u8 drop_ttl0_flg, + u8 inner_vlan_removal_en_flg); + + int (*vport_stop)(struct qed_dev *cdev, + u8 vport_id); + + int (*vport_update)(struct qed_dev *cdev, + struct qed_update_vport_params *params); + + int (*q_rx_start)(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void __iomem **pp_prod); + + int (*q_rx_stop)(struct qed_dev *cdev, + struct qed_stop_rxq_params *params); + + int (*q_tx_start)(struct qed_dev *cdev, + struct qed_queue_start_common_params *params, + dma_addr_t pbl_addr, + u16 pbl_size, + void __iomem **pp_doorbell); + + int (*q_tx_stop)(struct qed_dev *cdev, + struct qed_stop_txq_params *params); + + int (*filter_config)(struct qed_dev *cdev, + struct qed_filter_params *params); + + int (*fastpath_stop)(struct qed_dev *cdev); + + int (*eth_cqe_completion)(struct qed_dev *cdev, + u8 rss_id, + struct eth_slow_path_rx_cqe *cqe); + + void (*get_vport_stats)(struct qed_dev *cdev, + struct qed_eth_stats *stats); +}; + +const struct qed_eth_ops *qed_get_eth_ops(u32 version); +void qed_put_eth_ops(void); + +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h new file mode 100644 index 000000000000..dc9a1353f971 --- /dev/null +++ b/include/linux/qed/qed_if.h @@ -0,0 +1,498 @@ +/* QLogic qed NIC Driver + * + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_IF_H +#define _QED_IF_H + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/qed_chain.h> + +#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ + (void __iomem *)(reg_addr)) + +#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) + +#define QED_COALESCE_MAX 0xFF + +/* forward */ +struct qed_dev; + +struct qed_eth_pf_params { + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; +}; + +struct qed_pf_params { + struct qed_eth_pf_params eth_pf_params; +}; + +enum qed_int_mode { + QED_INT_MODE_INTA, + QED_INT_MODE_MSIX, + QED_INT_MODE_MSI, + QED_INT_MODE_POLL, +}; + +struct qed_sb_info { + struct status_block *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 + + struct qed_dev *cdev; +}; + +struct qed_dev_info { + unsigned long pci_mem_start; + unsigned long pci_mem_end; + unsigned int pci_irq; + u8 num_hwfns; + + u8 hw_mac[ETH_ALEN]; + bool is_mf; + + /* FW version */ + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + /* MFW version */ + u32 mfw_rev; + + u32 flash_size; + u8 mf_mode; +}; + +enum qed_sb_type { + QED_SB_TYPE_L2_QUEUE, +}; + +enum qed_protocol { + QED_PROTOCOL_ETH, +}; + +struct qed_link_params { + bool link_up; + +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) + u32 override_flags; + bool autoneg; + u32 adv_speeds; + u32 forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + u32 pause_config; +}; + +struct qed_link_output { + bool link_up; + + u32 supported_caps; /* In SUPPORTED defs */ + u32 advertised_caps; /* In ADVERTISED defs */ + u32 lp_caps; /* In ADVERTISED defs */ + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; +}; + +#define QED_DRV_VER_STR_SIZE 12 +struct qed_slowpath_params { + u32 int_mode; + u8 drv_major; + u8 drv_minor; + u8 drv_rev; + u8 drv_eng; + u8 name[QED_DRV_VER_STR_SIZE]; +}; + +#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ + +struct qed_int_info { + struct msix_entry *msix; + u8 msix_cnt; + + /* This should be updated by the protocol driver */ + u8 used_cnt; +}; + +struct qed_common_cb_ops { + void (*link_update)(void *dev, + struct qed_link_output *link); +}; + +struct qed_common_ops { + struct qed_dev* (*probe)(struct pci_dev *dev, + enum qed_protocol protocol, + u32 dp_module, u8 dp_level); + + void (*remove)(struct qed_dev *cdev); + + int (*set_power_state)(struct qed_dev *cdev, + pci_power_t state); + + void (*set_id)(struct qed_dev *cdev, + char name[], + char ver_str[]); + + /* Client drivers need to make this call before slowpath_start. + * PF params required for the call before slowpath_start is + * documented within the qed_pf_params structure definition. + */ + void (*update_pf_params)(struct qed_dev *cdev, + struct qed_pf_params *params); + int (*slowpath_start)(struct qed_dev *cdev, + struct qed_slowpath_params *params); + + int (*slowpath_stop)(struct qed_dev *cdev); + + /* Requests to use `cnt' interrupts for fastpath. + * upon success, returns number of interrupts allocated for fastpath. + */ + int (*set_fp_int)(struct qed_dev *cdev, + u16 cnt); + + /* Fills `info' with pointers required for utilizing interrupts */ + int (*get_fp_int)(struct qed_dev *cdev, + struct qed_int_info *info); + + u32 (*sb_init)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id, + enum qed_sb_type type); + + u32 (*sb_release)(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id); + + void (*simd_handler_config)(struct qed_dev *cdev, + void *token, + int index, + void (*handler)(void *)); + + void (*simd_handler_clean)(struct qed_dev *cdev, + int index); +/** + * @brief set_link - set links according to params + * + * @param cdev + * @param params - values used to override the default link configuration + * + * @return 0 on success, error otherwise. + */ + int (*set_link)(struct qed_dev *cdev, + struct qed_link_params *params); + +/** + * @brief get_link - returns the current link state. + * + * @param cdev + * @param if_link - structure to be filled with current link configuration. + */ + void (*get_link)(struct qed_dev *cdev, + struct qed_link_output *if_link); + +/** + * @brief - drains chip in case Tx completions fail to arrive due to pause. + * + * @param cdev + */ + int (*drain)(struct qed_dev *cdev); + +/** + * @brief update_msglvl - update module debug level + * + * @param cdev + * @param dp_module + * @param dp_level + */ + void (*update_msglvl)(struct qed_dev *cdev, + u32 dp_module, + u8 dp_level); + + int (*chain_alloc)(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + u16 num_elems, + size_t elem_size, + struct qed_chain *p_chain); + + void (*chain_free)(struct qed_dev *cdev, + struct qed_chain *p_chain); +}; + +/** + * @brief qed_get_protocol_version + * + * @param protocol + * + * @return version supported by qed for given protocol driver + */ +u32 qed_get_protocol_version(enum qed_protocol protocol); + +#define MASK_FIELD(_name, _value) \ + ((_value) &= (_name ## _MASK)) + +#define FIELD_VALUE(_name, _value) \ + ((_value & _name ## _MASK) << _name ## _SHIFT) + +#define SET_FIELD(value, name, flag) \ + do { \ + (value) &= ~(name ## _MASK << name ## _SHIFT); \ + (value) |= (((u64)flag) << (name ## _SHIFT)); \ + } while (0) + +#define GET_FIELD(value, name) \ + (((value) >> (name ## _SHIFT)) & name ## _MASK) + +/* Debug print definitions */ +#define DP_ERR(cdev, fmt, ...) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__) \ + +#define DP_NOTICE(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + \ + } \ + } while (0) + +#define DP_INFO(cdev, fmt, ...) \ + do { \ + if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +#define DP_VERBOSE(cdev, module, fmt, ...) \ + do { \ + if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ + ((cdev)->dp_module & module))) { \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } \ + } while (0) + +enum DP_LEVEL { + QED_LEVEL_VERBOSE = 0x0, + QED_LEVEL_INFO = 0x1, + QED_LEVEL_NOTICE = 0x2, + QED_LEVEL_ERR = 0x3, +}; + +#define QED_LOG_LEVEL_SHIFT (30) +#define QED_LOG_VERBOSE_MASK (0x3fffffff) +#define QED_LOG_INFO_MASK (0x40000000) +#define QED_LOG_NOTICE_MASK (0x80000000) + +enum DP_MODULE { + QED_MSG_SPQ = 0x10000, + QED_MSG_STATS = 0x20000, + QED_MSG_DCB = 0x40000, + QED_MSG_IOV = 0x80000, + QED_MSG_SP = 0x100000, + QED_MSG_STORAGE = 0x200000, + QED_MSG_CXT = 0x800000, + QED_MSG_ILT = 0x2000000, + QED_MSG_ROCE = 0x4000000, + QED_MSG_DEBUG = 0x8000000, + /* to be added...up to 0x8000000 */ +}; + +struct qed_eth_stats { + u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 tpa_coalesced_pkts; + u64 tpa_coalesced_events; + u64 tpa_aborts_num; + u64 tpa_not_coalesced_pkts; + u64 tpa_coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_127_byte_packets; + u64 rx_255_byte_packets; + u64 rx_511_byte_packets; + u64 rx_1023_byte_packets; + u64 rx_1518_byte_packets; + u64 rx_1522_byte_packets; + u64 rx_2047_byte_packets; + u64 rx_4095_byte_packets; + u64 rx_9216_byte_packets; + u64 rx_16383_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; + u64 brb_truncates; + u64 brb_discards; + u64 rx_mac_bytes; + u64 rx_mac_uc_packets; + u64 rx_mac_mc_packets; + u64 rx_mac_bc_packets; + u64 rx_mac_frames_ok; + u64 tx_mac_bytes; + u64 tx_mac_uc_packets; + u64 tx_mac_mc_packets; + u64 tx_mac_bc_packets; + u64 tx_mac_ctrl_frames; +}; + +#define QED_SB_IDX 0x0002 + +#define RX_PI 0 +#define TX_PI(tc) (RX_PI + 1 + tc) + +static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) +{ + u32 prod = 0; + u16 rc = 0; + + prod = le32_to_cpu(sb_info->sb_virt->prod_index) & + STATUS_BLOCK_PROD_INDEX_MASK; + if (sb_info->sb_ack != prod) { + sb_info->sb_ack = prod; + rc |= QED_SB_IDX; + } + + /* Let SB update */ + mmiowb(); + return rc; +} + +/** + * + * @brief This function creates an update command for interrupts that is + * written to the IGU. + * + * @param sb_info - This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @param int_cmd - Enable/Disable/Nop + * @param upd_flg - whether igu consumer should be + * updated. + * + * @return inline void + */ +static inline void qed_sb_ack(struct qed_sb_info *sb_info, + enum igu_int_cmd int_cmd, + u8 upd_flg) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + mmiowb(); + barrier(); +} + +static inline void __internal_ram_wr(void *p_hwfn, + void __iomem *addr, + int size, + u32 *data) + +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); +} + +static inline void internal_ram_wr(void __iomem *addr, + int size, + u32 *data) +{ + __internal_ram_wr(NULL, addr, size, data); +} + +#endif diff --git a/include/linux/random.h b/include/linux/random.h index e651874df2c9..a75840c1aa71 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -7,6 +7,8 @@ #define _LINUX_RANDOM_H #include <linux/list.h> +#include <linux/once.h> + #include <uapi/linux/random.h> struct random_ready_callback { @@ -45,6 +47,10 @@ struct rnd_state { u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) /** * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h new file mode 100644 index 000000000000..a63a33e6196e --- /dev/null +++ b/include/linux/rcu_sync.h @@ -0,0 +1,86 @@ +/* + * RCU-based infrastructure for lightweight reader-writer locking + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can access it online at + * http://www.gnu.org/licenses/gpl-2.0.html. + * + * Copyright (c) 2015, Red Hat, Inc. + * + * Author: Oleg Nesterov <oleg@redhat.com> + */ + +#ifndef _LINUX_RCU_SYNC_H_ +#define _LINUX_RCU_SYNC_H_ + +#include <linux/wait.h> +#include <linux/rcupdate.h> + +enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC }; + +/* Structure to mediate between updaters and fastpath-using readers. */ +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + + int cb_state; + struct rcu_head cb_head; + + enum rcu_sync_type gp_type; +}; + +extern void rcu_sync_lockdep_assert(struct rcu_sync *); + +/** + * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? + * @rsp: Pointer to rcu_sync structure to use for synchronization + * + * Returns true if readers are permitted to use their fastpaths. + * Must be invoked within an RCU read-side critical section whose + * flavor matches that of the rcu_sync struture. + */ +static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) +{ +#ifdef CONFIG_PROVE_RCU + rcu_sync_lockdep_assert(rsp); +#endif + return !rsp->gp_state; /* GP_IDLE */ +} + +extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_enter(struct rcu_sync *); +extern void rcu_sync_exit(struct rcu_sync *); +extern void rcu_sync_dtor(struct rcu_sync *); + +#define __RCU_SYNC_INITIALIZER(name, type) { \ + .gp_state = 0, \ + .gp_count = 0, \ + .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \ + .cb_state = 0, \ + .gp_type = type, \ + } + +#define __DEFINE_RCU_SYNC(name, type) \ + struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type) + +#define DEFINE_RCU_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SYNC) + +#define DEFINE_RCU_SCHED_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC) + +#define DEFINE_RCU_BH_SYNC(name) \ + __DEFINE_RCU_SYNC(name, RCU_BH_SYNC) + +#endif /* _LINUX_RCU_SYNC_H_ */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 17c6b1f84a77..5ed540986019 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ -({ \ - typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ - container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ -}) + container_of(lockless_dereference(ptr), type, member) /** * Where are list_empty_rcu() and list_first_entry_rcu()? diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..a0189ba67fde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -160,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, * more than one CPU). */ void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head)); + rcu_callback_t func); #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -191,7 +191,7 @@ void call_rcu(struct rcu_head *head, * memory ordering guarantees. */ void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head)); + rcu_callback_t func); /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -213,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head, * memory ordering guarantees. */ void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)); + rcu_callback_t func); void synchronize_sched(void); @@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - const int __n = ARRAY_SIZE(__crcu_array); \ - struct rcu_synchronize __rs_array[__n]; \ - \ - __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) @@ -275,7 +274,7 @@ do { \ * See the description of call_rcu() for more detailed information on * memory ordering guarantees. */ -void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); @@ -298,12 +297,14 @@ void synchronize_rcu(void); static inline void __rcu_read_lock(void) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_disable(); } static inline void __rcu_read_unlock(void) { - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_enable(); } static inline void synchronize_rcu(void) @@ -536,29 +537,9 @@ static inline int rcu_read_lock_sched_held(void) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -/* Deprecate rcu_lockdep_assert(): Use RCU_LOCKDEP_WARN() instead. */ -static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void) -{ -} - #ifdef CONFIG_PROVE_RCU /** - * rcu_lockdep_assert - emit lockdep splat if specified condition not met - * @c: condition to check - * @s: informative message - */ -#define rcu_lockdep_assert(c, s) \ - do { \ - static bool __section(.data.unlikely) __warned; \ - deprecate_rcu_lockdep_assert(); \ - if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ - __warned = true; \ - lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ - } \ - } while (0) - -/** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message @@ -595,7 +576,6 @@ static inline void rcu_preempt_sleep_check(void) #else /* #ifdef CONFIG_PROVE_RCU */ -#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert() #define RCU_LOCKDEP_WARN(c, s) do { } while (0) #define rcu_sleep_check() do { } while (0) @@ -812,6 +792,28 @@ static inline void rcu_preempt_sleep_check(void) #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** + * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism + * @p: The pointer to hand off + * + * This is simply an identity function, but it documents where a pointer + * is handed off from RCU to some other synchronization mechanism, for + * example, reference counting or locking. In C11, it would map to + * kill_dependency(). It could be used as follows: + * + * rcu_read_lock(); + * p = rcu_dereference(gp); + * long_lived = is_long_lived(p); + * if (long_lived) { + * if (!atomic_inc_not_zero(p->refcnt)) + * long_lived = false; + * else + * p = rcu_pointer_handoff(p); + * } + * rcu_read_unlock(); + */ +#define rcu_pointer_handoff(p) (p) + +/** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs @@ -1066,7 +1068,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define __kfree_rcu(head, offset) \ do { \ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ - kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ + kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ } while (0) /** diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index ff968b7af3a4..4c1aaf9cce7b 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -83,7 +83,7 @@ static inline void synchronize_sched_expedited(void) } static inline void kfree_call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) + rcu_callback_t func) { call_rcu(head, func); } @@ -216,6 +216,7 @@ static inline bool rcu_is_watching(void) static inline void rcu_all_qs(void) { + barrier(); /* Avoid RCU read-side critical sections leaking across. */ } #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5abec82f325e..60d15a080d7c 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -48,7 +48,7 @@ void synchronize_rcu_bh(void); void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); -void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); +void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); /** * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 8fc0bfd8edc4..d68bb402120e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -296,6 +296,8 @@ typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg, unsigned int *val); typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg, unsigned int val); +typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); typedef struct regmap_async *(*regmap_hw_async_alloc)(void); typedef void (*regmap_hw_free_context)(void *context); @@ -335,6 +337,7 @@ struct regmap_bus { regmap_hw_gather_write gather_write; regmap_hw_async_write async_write; regmap_hw_reg_write reg_write; + regmap_hw_reg_update_bits reg_update_bits; regmap_hw_read read; regmap_hw_reg_read reg_read; regmap_hw_free_context free_context; @@ -791,6 +794,9 @@ struct regmap_irq { unsigned int mask; }; +#define REGMAP_IRQ_REG(_irq, _off, _mask) \ + [_irq] = { .reg_offset = (_off), .mask = (_mask) } + /** * Description of a generic regmap irq_chip. This is not intended to * handle every possible interrupt controller, but it should handle a @@ -800,6 +806,8 @@ struct regmap_irq { * * @status_base: Base status register address. * @mask_base: Base mask register address. + * @unmask_base: Base unmask register address. for chips who have + * separate mask and unmask registers * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. @@ -807,6 +815,7 @@ struct regmap_irq { * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. * @use_ack: Use @ack register even if it is zero. + * @ack_invert: Inverted ack register: cleared bits for ack. * @wake_invert: Inverted wake register: cleared bits are wake enabled. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * @@ -820,12 +829,14 @@ struct regmap_irq_chip { unsigned int status_base; unsigned int mask_base; + unsigned int unmask_base; unsigned int ack_base; unsigned int wake_base; unsigned int irq_reg_stride; bool init_ack_masked:1; bool mask_invert:1; bool use_ack:1; + bool ack_invert:1; bool wake_invert:1; bool runtime_pm:1; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 45932228cbf5..9c2903e58adb 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -245,6 +245,7 @@ enum regulator_type { * @linear_min_sel: Minimal selector for starting linear mapping * @fixed_uV: Fixed voltage of rails. * @ramp_delay: Time to settle down after voltage change (unit: uV/us) + * @min_dropout_uV: The minimum dropout voltage this regulator can handle * @linear_ranges: A constant table of possible voltage ranges. * @n_linear_ranges: Number of entries in the @linear_ranges table. * @volt_table: Voltage mapping table (if table based mapping) @@ -292,6 +293,7 @@ struct regulator_desc { unsigned int linear_min_sel; int fixed_uV; unsigned int ramp_delay; + int min_dropout_uV; const struct regulator_linear_range *linear_ranges; int n_linear_ranges; diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 39adaa9529eb..4be5048b1fbe 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -33,11 +33,11 @@ extern wait_queue_head_t netdev_unregistering_wq; extern struct mutex net_mutex; #ifdef CONFIG_PROVE_LOCKING -extern int lockdep_rtnl_is_held(void); +extern bool lockdep_rtnl_is_held(void); #else -static inline int lockdep_rtnl_is_held(void) +static inline bool lockdep_rtnl_is_held(void) { - return 1; + return true; } #endif /* #ifdef CONFIG_PROVE_LOCKING */ diff --git a/include/linux/sched.h b/include/linux/sched.h index a4ab9daa387c..eeb5066a44fb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -384,6 +384,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; void lockup_detector_init(void); #else static inline void touch_softlockup_watchdog(void) @@ -599,33 +600,42 @@ struct task_cputime_atomic { .sum_exec_runtime = ATOMIC64_INIT(0), \ } -#ifdef CONFIG_PREEMPT_COUNT -#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) -#else -#define PREEMPT_DISABLED PREEMPT_ENABLED -#endif +#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* - * Disable preemption until the scheduler is running. - * Reset by start_kernel()->sched_init()->init_idle(). + * Disable preemption until the scheduler is running -- use an unconditional + * value so that it also works on !PREEMPT_COUNT kernels. + * + * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). + */ +#define INIT_PREEMPT_COUNT PREEMPT_OFFSET + +/* + * Initial preempt_count value; reflects the preempt_count schedule invariant + * which states that during context switches: + * + * preempt_count() == 2*PREEMPT_DISABLE_OFFSET * - * We include PREEMPT_ACTIVE to avoid cond_resched() from working - * before the scheduler is active -- see should_resched(). + * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. + * Note: See finish_task_switch(). */ -#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) +#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. - * @running: non-zero when there are timers running and - * @cputime receives updates. + * @running: true when there are timers running and + * @cputime_atomic receives updates. + * @checking_timer: true when a thread in the group is in the + * process of checking for thread group timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; - int running; + bool running; + bool checking_timer; }; #include <linux/rwsem.h> @@ -828,7 +838,7 @@ struct user_struct { struct hlist_node uidhash_node; kuid_t uid; -#ifdef CONFIG_PERF_EVENTS +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) atomic_long_t locked_vm; #endif }; @@ -1127,8 +1137,6 @@ struct sched_domain_topology_level { #endif }; -extern struct sched_domain_topology_level *sched_domain_topology; - extern void set_sched_topology(struct sched_domain_topology_level *tl); extern void wake_up_if_idle(int cpu); @@ -1177,10 +1185,10 @@ struct load_weight { /* * The load_avg/util_avg accumulates an infinite geometric series. - * 1) load_avg factors the amount of time that a sched_entity is - * runnable on a rq into its weight. For cfs_rq, it is the aggregated - * such weights of all runnable and blocked sched_entities. - * 2) util_avg factors frequency scaling into the amount of time + * 1) load_avg factors frequency scaling into the amount of time that a + * sched_entity is runnable on a rq into its weight. For cfs_rq, it is the + * aggregated such weights of all runnable and blocked sched_entities. + * 2) util_avg factors frequency and cpu scaling into the amount of time * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE]. * For cfs_rq, it is the aggregated such times of all runnable and * blocked sched_entities. @@ -1330,10 +1338,12 @@ struct sched_dl_entity { union rcu_special { struct { - bool blocked; - bool need_qs; - } b; - short s; + u8 blocked; + u8 need_qs; + u8 exp_need_qs; + u8 pad; /* Otherwise the compiler can store garbage here. */ + } b; /* Bits. */ + u32 s; /* Set of bits. */ }; struct rcu_node; @@ -1451,7 +1461,9 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; - +#ifdef CONFIG_MEMCG + unsigned memcg_may_oom:1; +#endif #ifdef CONFIG_MEMCG_KMEM unsigned memcg_kmem_skip_account:1; #endif @@ -1782,12 +1794,12 @@ struct task_struct { unsigned long trace_recursion; #endif /* CONFIG_TRACING */ #ifdef CONFIG_MEMCG - struct memcg_oom_info { - struct mem_cgroup *memcg; - gfp_t gfp_mask; - int order; - unsigned int may_oom:1; - } memcg_oom; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + + /* number of pages to reclaim on returning to userland */ + unsigned int memcg_nr_pages_over_high; #endif #ifdef CONFIG_UPROBES struct uprobe_task *utask; diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 9d303b8847df..9089a2ae913d 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -21,4 +21,9 @@ static inline int dl_task(struct task_struct *p) return dl_prio(p->prio); } +static inline bool dl_time_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + #endif /* _SCHED_DEADLINE_H */ diff --git a/include/linux/scif.h b/include/linux/scif.h index 44f4f3898bbe..49a35d6edc94 100644 --- a/include/linux/scif.h +++ b/include/linux/scif.h @@ -55,6 +55,7 @@ #include <linux/types.h> #include <linux/poll.h> +#include <linux/device.h> #include <linux/scif_ioctl.h> #define SCIF_ACCEPT_SYNC 1 @@ -92,6 +93,70 @@ enum { #define SCIF_PORT_RSVD 1088 typedef struct scif_endpt *scif_epd_t; +typedef struct scif_pinned_pages *scif_pinned_pages_t; + +/** + * struct scif_range - SCIF registered range used in kernel mode + * @cookie: cookie used internally by SCIF + * @nr_pages: number of pages of PAGE_SIZE + * @prot_flags: R/W protection + * @phys_addr: Array of bus addresses + * @va: Array of kernel virtual addresses backed by the pages in the phys_addr + * array. The va is populated only when called on the host for a remote + * SCIF connection on MIC. This is required to support the use case of DMA + * between MIC and another device which is not a SCIF node e.g., an IB or + * ethernet NIC. + */ +struct scif_range { + void *cookie; + int nr_pages; + int prot_flags; + dma_addr_t *phys_addr; + void __iomem **va; +}; + +/** + * struct scif_pollepd - SCIF endpoint to be monitored via scif_poll + * @epd: SCIF endpoint + * @events: requested events + * @revents: returned events + */ +struct scif_pollepd { + scif_epd_t epd; + short events; + short revents; +}; + +/** + * scif_peer_dev - representation of a peer SCIF device + * + * Peer devices show up as PCIe devices for the mgmt node but not the cards. + * The mgmt node discovers all the cards on the PCIe bus and informs the other + * cards about their peers. Upon notification of a peer a node adds a peer + * device to the peer bus to maintain symmetry in the way devices are + * discovered across all nodes in the SCIF network. + * + * @dev: underlying device + * @dnode - The destination node which this device will communicate with. + */ +struct scif_peer_dev { + struct device dev; + u8 dnode; +}; + +/** + * scif_client - representation of a SCIF client + * @name: client name + * @probe - client method called when a peer device is registered + * @remove - client method called when a peer device is unregistered + * @si - subsys_interface used internally for implementing SCIF clients + */ +struct scif_client { + const char *name; + void (*probe)(struct scif_peer_dev *spdev); + void (*remove)(struct scif_peer_dev *spdev); + struct subsys_interface si; +}; #define SCIF_OPEN_FAILED ((scif_epd_t)-1) #define SCIF_REGISTER_FAILED ((off_t)-1) @@ -345,7 +410,6 @@ int scif_close(scif_epd_t epd); * Errors: * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - An invalid address was specified for a parameter * EINVAL - flags is invalid, or len is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -398,7 +462,6 @@ int scif_send(scif_epd_t epd, void *msg, int len, int flags); * EAGAIN - The destination node is returning from a low power state * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - An invalid address was specified for a parameter * EINVAL - flags is invalid, or len is negative * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -461,9 +524,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); * SCIF_PROT_READ - allow read operations from the window * SCIF_PROT_WRITE - allow write operations to the window * - * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a - * fixed offset. - * * Return: * Upon successful completion, scif_register() returns the offset at which the * mapping was placed (po); otherwise in user mode SCIF_REGISTER_FAILED (that @@ -476,7 +536,6 @@ int scif_recv(scif_epd_t epd, void *msg, int len, int flags); * EAGAIN - The mapping could not be performed due to lack of resources * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid * EINVAL - map_flags is invalid, or prot_flags is invalid, or SCIF_MAP_FIXED is * set in flags, and offset is not a multiple of the page size, or addr is not a * multiple of the page size, or len is not a multiple of the page size, or is @@ -759,7 +818,6 @@ int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, off_t * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -840,7 +898,6 @@ int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, off_t roffset, * EACCESS - Attempt to write to a read-only range * EBADF, ENOTTY - epd is not a valid endpoint descriptor * ECONNRESET - Connection reset by peer - * EFAULT - Addresses in the range [addr, addr + len - 1] are invalid * EINVAL - rma_flags is invalid * ENODEV - The remote node is lost or existed, but is not currently in the * network since it may have crashed @@ -984,10 +1041,299 @@ int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, off_t roff, * online nodes in the SCIF network including 'self'; otherwise in user mode * -1 is returned and errno is set to indicate the error; in kernel mode no * errors are returned. + */ +int scif_get_node_ids(u16 *nodes, int len, u16 *self); + +/** + * scif_pin_pages() - Pin a set of pages + * @addr: Virtual address of range to pin + * @len: Length of range to pin + * @prot_flags: Page protection flags + * @map_flags: Page classification flags + * @pinned_pages: Handle to pinned pages + * + * scif_pin_pages() pins (locks in physical memory) the physical pages which + * back the range of virtual address pages starting at addr and continuing for + * len bytes. addr and len are constrained to be multiples of the page size. A + * successful scif_pin_pages() call returns a handle to pinned_pages which may + * be used in subsequent calls to scif_register_pinned_pages(). + * + * The pages will remain pinned as long as there is a reference against the + * scif_pinned_pages_t value returned by scif_pin_pages() and until + * scif_unpin_pages() is called, passing the scif_pinned_pages_t value. A + * reference is added to a scif_pinned_pages_t value each time a window is + * created by calling scif_register_pinned_pages() and passing the + * scif_pinned_pages_t value. A reference is removed from a + * scif_pinned_pages_t value each time such a window is deleted. + * + * Subsequent operations which change the memory pages to which virtual + * addresses are mapped (such as mmap(), munmap()) have no effect on the + * scif_pinned_pages_t value or windows created against it. + * + * If the process will fork(), it is recommended that the registered + * virtual address range be marked with MADV_DONTFORK. Doing so will prevent + * problems due to copy-on-write semantics. + * + * The prot_flags argument is formed by OR'ing together one or more of the + * following values. + * SCIF_PROT_READ - allow read operations against the pages + * SCIF_PROT_WRITE - allow write operations against the pages + * The map_flags argument can be set as SCIF_MAP_KERNEL to interpret addr as a + * kernel space address. By default, addr is interpreted as a user space + * address. + * + * Return: + * Upon successful completion, scif_pin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. * * Errors: - * EFAULT - Bad address + * EINVAL - prot_flags is invalid, map_flags is invalid, or offset is negative + * ENOMEM - Not enough space */ -int scif_get_node_ids(u16 *nodes, int len, u16 *self); +int scif_pin_pages(void *addr, size_t len, int prot_flags, int map_flags, + scif_pinned_pages_t *pinned_pages); + +/** + * scif_unpin_pages() - Unpin a set of pages + * @pinned_pages: Handle to pinned pages to be unpinned + * + * scif_unpin_pages() prevents scif_register_pinned_pages() from registering new + * windows against pinned_pages. The physical pages represented by pinned_pages + * will remain pinned until all windows previously registered against + * pinned_pages are deleted (the window is scif_unregister()'d and all + * references to the window are removed (see scif_unregister()). + * + * pinned_pages must have been obtain from a previous call to scif_pin_pages(). + * After calling scif_unpin_pages(), it is an error to pass pinned_pages to + * scif_register_pinned_pages(). + * + * Return: + * Upon successful completion, scif_unpin_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * + * Errors: + * EINVAL - pinned_pages is not valid + */ +int scif_unpin_pages(scif_pinned_pages_t pinned_pages); + +/** + * scif_register_pinned_pages() - Mark a memory region for remote access. + * @epd: endpoint descriptor + * @pinned_pages: Handle to pinned pages + * @offset: Registered address space offset + * @map_flags: Flags which control where pages are mapped + * + * The scif_register_pinned_pages() function opens a window, a range of whole + * pages of the registered address space of the endpoint epd, starting at + * offset po. The value of po, further described below, is a function of the + * parameters offset and pinned_pages, and the value of map_flags. Each page of + * the window represents a corresponding physical memory page of the range + * represented by pinned_pages; the length of the window is the same as the + * length of range represented by pinned_pages. A successful + * scif_register_pinned_pages() call returns po as the return value. + * + * When SCIF_MAP_FIXED is set in the map_flags argument, po will be offset + * exactly, and offset is constrained to be a multiple of the page size. The + * mapping established by scif_register_pinned_pages() will not replace any + * existing registration; an error is returned if any page of the new window + * would intersect an existing window. + * + * When SCIF_MAP_FIXED is not set, the implementation uses offset in an + * implementation-defined manner to arrive at po. The po so chosen will be an + * area of the registered address space that the implementation deems suitable + * for a mapping of the required size. An offset value of 0 is interpreted as + * granting the implementation complete freedom in selecting po, subject to + * constraints described below. A non-zero value of offset is taken to be a + * suggestion of an offset near which the mapping should be placed. When the + * implementation selects a value for po, it does not replace any extant + * window. In all cases, po will be a multiple of the page size. + * + * The physical pages which are so represented by a window are available for + * access in calls to scif_get_pages(), scif_readfrom(), scif_writeto(), + * scif_vreadfrom(), and scif_vwriteto(). While a window is registered, the + * physical pages represented by the window will not be reused by the memory + * subsystem for any other purpose. Note that the same physical page may be + * represented by multiple windows. + * + * Windows created by scif_register_pinned_pages() are unregistered by + * scif_unregister(). + * + * The map_flags argument can be set to SCIF_MAP_FIXED which interprets a + * fixed offset. + * + * Return: + * Upon successful completion, scif_register_pinned_pages() returns the offset + * at which the mapping was placed (po); otherwise the negative of one of the + * following errors is returned. + * + * Errors: + * EADDRINUSE - SCIF_MAP_FIXED is set in map_flags and pages in the new window + * would intersect an existing window + * EAGAIN - The mapping could not be performed due to lack of resources + * ECONNRESET - Connection reset by peer + * EINVAL - map_flags is invalid, or SCIF_MAP_FIXED is set in map_flags, and + * offset is not a multiple of the page size, or offset is negative + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOMEM - Not enough space + * ENOTCONN - The endpoint is not connected + */ +off_t scif_register_pinned_pages(scif_epd_t epd, + scif_pinned_pages_t pinned_pages, + off_t offset, int map_flags); + +/** + * scif_get_pages() - Add references to remote registered pages + * @epd: endpoint descriptor + * @offset: remote registered offset + * @len: length of range of pages + * @pages: returned scif_range structure + * + * scif_get_pages() returns the addresses of the physical pages represented by + * those pages of the registered address space of the peer of epd, starting at + * offset and continuing for len bytes. offset and len are constrained to be + * multiples of the page size. + * + * All of the pages in the specified range [offset, offset + len - 1] must be + * within a single window of the registered address space of the peer of epd. + * + * The addresses are returned as a virtually contiguous array pointed to by the + * phys_addr component of the scif_range structure whose address is returned in + * pages. The nr_pages component of scif_range is the length of the array. The + * prot_flags component of scif_range holds the protection flag value passed + * when the pages were registered. + * + * Each physical page whose address is returned by scif_get_pages() remains + * available and will not be released for reuse until the scif_range structure + * is returned in a call to scif_put_pages(). The scif_range structure returned + * by scif_get_pages() must be unmodified. + * + * It is an error to call scif_close() on an endpoint on which a scif_range + * structure of that endpoint has not been returned to scif_put_pages(). + * + * Return: + * Upon successful completion, scif_get_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * ECONNRESET - Connection reset by peer. + * EINVAL - offset is not a multiple of the page size, or offset is negative, or + * len is not a multiple of the page size + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + * ENXIO - Offsets in the range [offset, offset + len - 1] are invalid + * for the registered address space of the peer epd + */ +int scif_get_pages(scif_epd_t epd, off_t offset, size_t len, + struct scif_range **pages); + +/** + * scif_put_pages() - Remove references from remote registered pages + * @pages: pages to be returned + * + * scif_put_pages() releases a scif_range structure previously obtained by + * calling scif_get_pages(). The physical pages represented by pages may + * be reused when the window which represented those pages is unregistered. + * Therefore, those pages must not be accessed after calling scif_put_pages(). + * + * Return: + * Upon successful completion, scif_put_pages() returns 0; otherwise the + * negative of one of the following errors is returned. + * Errors: + * EINVAL - pages does not point to a valid scif_range structure, or + * the scif_range structure pointed to by pages was already returned + * ENODEV - The remote node is lost or existed, but is not currently in the + * network since it may have crashed + * ENOTCONN - The endpoint is not connected + */ +int scif_put_pages(struct scif_range *pages); + +/** + * scif_poll() - Wait for some event on an endpoint + * @epds: Array of endpoint descriptors + * @nepds: Length of epds + * @timeout: Upper limit on time for which scif_poll() will block + * + * scif_poll() waits for one of a set of endpoints to become ready to perform + * an I/O operation. + * + * The epds argument specifies the endpoint descriptors to be examined and the + * events of interest for each endpoint descriptor. epds is a pointer to an + * array with one member for each open endpoint descriptor of interest. + * + * The number of items in the epds array is specified in nepds. The epd field + * of scif_pollepd is an endpoint descriptor of an open endpoint. The field + * events is a bitmask specifying the events which the application is + * interested in. The field revents is an output parameter, filled by the + * kernel with the events that actually occurred. The bits returned in revents + * can include any of those specified in events, or one of the values POLLERR, + * POLLHUP, or POLLNVAL. (These three bits are meaningless in the events + * field, and will be set in the revents field whenever the corresponding + * condition is true.) + * + * If none of the events requested (and no error) has occurred for any of the + * endpoint descriptors, then scif_poll() blocks until one of the events occurs. + * + * The timeout argument specifies an upper limit on the time for which + * scif_poll() will block, in milliseconds. Specifying a negative value in + * timeout means an infinite timeout. + * + * The following bits may be set in events and returned in revents. + * POLLIN - Data may be received without blocking. For a connected + * endpoint, this means that scif_recv() may be called without blocking. For a + * listening endpoint, this means that scif_accept() may be called without + * blocking. + * POLLOUT - Data may be sent without blocking. For a connected endpoint, this + * means that scif_send() may be called without blocking. POLLOUT may also be + * used to block waiting for a non-blocking connect to complete. This bit value + * has no meaning for a listening endpoint and is ignored if specified. + * + * The following bits are only returned in revents, and are ignored if set in + * events. + * POLLERR - An error occurred on the endpoint + * POLLHUP - The connection to the peer endpoint was disconnected + * POLLNVAL - The specified endpoint descriptor is invalid. + * + * Return: + * Upon successful completion, scif_poll() returns a non-negative value. A + * positive value indicates the total number of endpoint descriptors that have + * been selected (that is, endpoint descriptors for which the revents member is + * non-zero). A value of 0 indicates that the call timed out and no endpoint + * descriptors have been selected. Otherwise in user mode -1 is returned and + * errno is set to indicate the error; in kernel mode the negative of one of + * the following errors is returned. + * + * Errors: + * EINTR - A signal occurred before any requested event + * EINVAL - The nepds argument is greater than {OPEN_MAX} + * ENOMEM - There was no space to allocate file descriptor tables + */ +int scif_poll(struct scif_pollepd *epds, unsigned int nepds, long timeout); + +/** + * scif_client_register() - Register a SCIF client + * @client: client to be registered + * + * scif_client_register() registers a SCIF client. The probe() method + * of the client is called when SCIF peer devices come online and the + * remove() method is called when the peer devices disappear. + * + * Return: + * Upon successful completion, scif_client_register() returns a non-negative + * value. Otherwise the return value is the same as subsys_interface_register() + * in the kernel. + */ +int scif_client_register(struct scif_client *client); + +/** + * scif_client_unregister() - Unregister a SCIF client + * @client: client to be unregistered + * + * scif_client_unregister() unregisters a SCIF client. + * + * Return: + * None + */ +void scif_client_unregister(struct scif_client *client); #endif /* __SCIF_H__ */ diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index f4265039a94c..2296e6b2f690 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -95,4 +95,15 @@ static inline void get_seccomp_filter(struct task_struct *tsk) return; } #endif /* CONFIG_SECCOMP_FILTER */ + +#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) +extern long seccomp_get_filter(struct task_struct *task, + unsigned long filter_off, void __user *data); +#else +static inline long seccomp_get_filter(struct task_struct *task, + unsigned long n, void __user *data) +{ + return -EINVAL; +} +#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ #endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 79d85ddf8093..2f4c1f7aa7db 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2, unsigned long arg4, unsigned long arg5) { - return cap_task_prctl(option, arg2, arg3, arg3, arg5); + return cap_task_prctl(option, arg2, arg3, arg4, arg5); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2738d355cdf9..24f4dfd94c51 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -179,6 +179,9 @@ struct nf_bridge_info { u8 bridged_dnat:1; __u16 frag_max_size; struct net_device *physindev; + + /* always valid & non-NULL from FORWARD on, for physdev match */ + struct net_device *physoutdev; union { /* prerouting: detect dnat in orig/reply direction */ __be32 ipv4_daddr; @@ -189,9 +192,6 @@ struct nf_bridge_info { * skb is out in neigh layer. */ char neigh_header[8]; - - /* always valid & non-NULL from FORWARD on, for physdev match */ - struct net_device *physoutdev; }; }; #endif @@ -463,6 +463,15 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, return delta_us; } +static inline bool skb_mstamp_after(const struct skb_mstamp *t1, + const struct skb_mstamp *t0) +{ + s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; + + if (!diff) + diff = t1->stamp_us - t0->stamp_us; + return diff > 0; +} /** * struct sk_buff - socket buffer @@ -2707,6 +2716,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, { if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); + else if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start_offset(skb) < 0) + skb->ip_summed = CHECKSUM_NONE; } unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); diff --git a/include/linux/slab.h b/include/linux/slab.h index 7e37d448ed91..7c82e3b307a3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -111,7 +111,7 @@ struct mem_cgroup; * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); -int slab_is_available(void); +bool slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index e6109a6cd8f6..12910cf19869 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h @@ -24,9 +24,6 @@ struct smpboot_thread_data; * parked (cpu offline) * @unpark: Optional unpark function, called when the thread is * unparked (cpu online) - * @pre_unpark: Optional unpark function, called before the thread is - * unparked (cpu online). This is not guaranteed to be - * called on the target cpu of the thread. Careful! * @cpumask: Internal state. To update which threads are unparked, * call smpboot_update_cpumask_percpu_thread(). * @selfparking: Thread is not parked by the park function. @@ -42,7 +39,6 @@ struct smp_hotplug_thread { void (*cleanup)(unsigned int cpu, bool online); void (*park)(unsigned int cpu); void (*unpark)(unsigned int cpu); - void (*pre_unpark)(unsigned int cpu); cpumask_var_t cpumask; bool selfparking; const char *thread_comm; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 269e8afd3e2a..cce80e6dc7d1 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type; /** * struct spi_statistics - statistics for spi transfers - * @clock: lock protecting this structure + * @lock: lock protecting this structure * * @messages: number of spi-messages handled * @transfers: number of spi_transfers handled @@ -51,6 +51,8 @@ extern struct bus_type spi_bus_type; * @bytes_tx: number of bytes sent to device * @bytes_rx: number of bytes received from device * + * @transfer_bytes_histo: + * transfer bytes histogramm */ struct spi_statistics { spinlock_t lock; /* lock for the whole structure */ @@ -68,6 +70,8 @@ struct spi_statistics { unsigned long long bytes_rx; unsigned long long bytes_tx; +#define SPI_STATISTICS_HISTO_SIZE 17 + unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; }; void spi_statistics_add_transfer_stats(struct spi_statistics *stats, @@ -250,7 +254,7 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv) return drv ? container_of(drv, struct spi_driver, driver) : NULL; } -extern int spi_register_driver(struct spi_driver *sdrv); +extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); /** * spi_unregister_driver - reverse effect of spi_register_driver @@ -263,6 +267,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } +/* use a define to avoid include chaining to get THIS_MODULE */ +#define spi_register_driver(driver) \ + __spi_register_driver(THIS_MODULE, driver) + /** * module_spi_driver() - Helper macro for registering a SPI driver * @__spi_driver: spi_driver struct @@ -843,8 +851,10 @@ extern int spi_bus_unlock(struct spi_master *master); * @len: data buffer size * Context: can sleep * - * This writes the buffer and returns zero or a negative error code. + * This function writes the buffer @buf. * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. */ static inline int spi_write(struct spi_device *spi, const void *buf, size_t len) @@ -867,8 +877,10 @@ spi_write(struct spi_device *spi, const void *buf, size_t len) * @len: data buffer size * Context: can sleep * - * This reads the buffer and returns zero or a negative error code. + * This function reads the buffer @buf. * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. */ static inline int spi_read(struct spi_device *spi, void *buf, size_t len) @@ -895,7 +907,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len) * * For more specific semantics see spi_sync(). * - * It returns zero on success, else a negative error code. + * Return: Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, @@ -919,9 +931,10 @@ extern int spi_write_then_read(struct spi_device *spi, * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) eight bit number returned by the - * device, or else a negative error code. Callable only from - * contexts that can sleep. + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) eight bit number returned by the + * device, or else a negative error code. */ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) { @@ -940,12 +953,13 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) sixteen bit number returned by the - * device, or else a negative error code. Callable only from - * contexts that can sleep. - * * The number is returned in wire-order, which is at least sometimes * big-endian. + * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the + * device, or else a negative error code. */ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) { @@ -964,13 +978,13 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) sixteen bit number returned by the device in cpu - * endianness, or else a negative error code. Callable only from contexts that - * can sleep. - * * This function is similar to spi_w8r16, with the exception that it will * convert the read 16 bit data word from big-endian to native endianness. * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the device in cpu + * endianness, or else a negative error code. */ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index 85578d4be034..154788ed218c 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -4,7 +4,7 @@ #include <linux/workqueue.h> struct spi_bitbang { - spinlock_t lock; + struct mutex lock; u8 busy; u8 use_dma; u8 flags; /* extra spi->mode support */ diff --git a/include/linux/spmi.h b/include/linux/spmi.h index f84212cd3b7d..1396a255d2a2 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -153,7 +153,9 @@ static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) return container_of(d, struct spmi_driver, driver); } -int spmi_driver_register(struct spmi_driver *sdrv); +#define spmi_driver_register(sdrv) \ + __spmi_driver_register(sdrv, THIS_MODULE) +int __spmi_driver_register(struct spmi_driver *sdrv, struct module *owner); /** * spmi_driver_unregister() - unregister an SPMI client driver diff --git a/include/linux/srcu.h b/include/linux/srcu.h index bdeb4567b71e..f5f80c5643ac 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -215,8 +215,11 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) */ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) { - int retval = __srcu_read_lock(sp); + int retval; + preempt_disable(); + retval = __srcu_read_lock(sp); + preempt_enable(); rcu_lock_acquire(&(sp)->dep_map); return retval; } diff --git a/include/linux/stm.h b/include/linux/stm.h new file mode 100644 index 000000000000..9d0083d364e6 --- /dev/null +++ b/include/linux/stm.h @@ -0,0 +1,126 @@ +/* + * System Trace Module (STM) infrastructure apis + * Copyright (C) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _STM_H_ +#define _STM_H_ + +#include <linux/device.h> + +/** + * enum stp_packet_type - STP packets that an STM driver sends + */ +enum stp_packet_type { + STP_PACKET_DATA = 0, + STP_PACKET_FLAG, + STP_PACKET_USER, + STP_PACKET_MERR, + STP_PACKET_GERR, + STP_PACKET_TRIG, + STP_PACKET_XSYNC, +}; + +/** + * enum stp_packet_flags - STP packet modifiers + */ +enum stp_packet_flags { + STP_PACKET_MARKED = 0x1, + STP_PACKET_TIMESTAMPED = 0x2, +}; + +struct stp_policy; + +struct stm_device; + +/** + * struct stm_data - STM device description and callbacks + * @name: device name + * @stm: internal structure, only used by stm class code + * @sw_start: first STP master available to software + * @sw_end: last STP master available to software + * @sw_nchannels: number of STP channels per master + * @sw_mmiosz: size of one channel's IO space, for mmap, optional + * @packet: callback that sends an STP packet + * @mmio_addr: mmap callback, optional + * @link: called when a new stm_source gets linked to us, optional + * @unlink: likewise for unlinking, again optional + * @set_options: set device-specific options on a channel + * + * Fill out this structure before calling stm_register_device() to create + * an STM device and stm_unregister_device() to destroy it. It will also be + * passed back to @packet(), @mmio_addr(), @link(), @unlink() and @set_options() + * callbacks. + * + * Normally, an STM device will have a range of masters available to software + * and the rest being statically assigned to various hardware trace sources. + * The former is defined by the the range [@sw_start..@sw_end] of the device + * description. That is, the lowest master that can be allocated to software + * writers is @sw_start and data from this writer will appear is @sw_start + * master in the STP stream. + */ +struct stm_data { + const char *name; + struct stm_device *stm; + unsigned int sw_start; + unsigned int sw_end; + unsigned int sw_nchannels; + unsigned int sw_mmiosz; + ssize_t (*packet)(struct stm_data *, unsigned int, + unsigned int, unsigned int, + unsigned int, unsigned int, + const unsigned char *); + phys_addr_t (*mmio_addr)(struct stm_data *, unsigned int, + unsigned int, unsigned int); + int (*link)(struct stm_data *, unsigned int, + unsigned int); + void (*unlink)(struct stm_data *, unsigned int, + unsigned int); + long (*set_options)(struct stm_data *, unsigned int, + unsigned int, unsigned int, + unsigned long); +}; + +int stm_register_device(struct device *parent, struct stm_data *stm_data, + struct module *owner); +void stm_unregister_device(struct stm_data *stm_data); + +struct stm_source_device; + +/** + * struct stm_source_data - STM source device description and callbacks + * @name: device name, will be used for policy lookup + * @src: internal structure, only used by stm class code + * @nr_chans: number of channels to allocate + * @link: called when this source gets linked to an STM device + * @unlink: called when this source is about to get unlinked from its STM + * + * Fill in this structure before calling stm_source_register_device() to + * register a source device. Also pass it to unregister and write calls. + */ +struct stm_source_data { + const char *name; + struct stm_source_device *src; + unsigned int percpu; + unsigned int nr_chans; + int (*link)(struct stm_source_data *data); + void (*unlink)(struct stm_source_data *data); +}; + +int stm_source_register_device(struct device *parent, + struct stm_source_data *data); +void stm_source_unregister_device(struct stm_source_data *data); + +int stm_source_write(struct stm_source_data *data, unsigned int chan, + const char *buf, size_t count); + +#endif /* _STM_H_ */ diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 414d924318ce..0adedca24c5b 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -33,6 +33,8 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); +void stop_machine_park(int cpu); +void stop_machine_unpark(int cpu); #else /* CONFIG_SMP */ diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 7591788e9fbf..357e44c1a46b 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -42,6 +42,7 @@ struct sock_xprt { /* * Connection of transports */ + unsigned long sock_state; struct delayed_work connect_worker; struct sockaddr_storage srcaddr; unsigned short srcport; @@ -76,6 +77,8 @@ struct sock_xprt { */ #define TCP_RPC_REPLY (1UL << 6) +#define XPRT_SOCK_CONNECTING 1U + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5efe743ce1e8..8b6ec7ef0854 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -202,6 +202,36 @@ struct platform_freeze_ops { extern void suspend_set_ops(const struct platform_suspend_ops *ops); extern int suspend_valid_only_mem(suspend_state_t state); +extern unsigned int pm_suspend_global_flags; + +#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) +#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) + +static inline void pm_suspend_clear_flags(void) +{ + pm_suspend_global_flags = 0; +} + +static inline void pm_set_suspend_via_firmware(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_SUSPEND; +} + +static inline void pm_set_resume_via_firmware(void) +{ + pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; +} + +static inline bool pm_suspend_via_firmware(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND); +} + +static inline bool pm_resume_via_firmware(void) +{ + return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); +} + /* Suspend-to-idle state machnine. */ enum freeze_state { FREEZE_STATE_NONE, /* Not suspended/suspending. */ @@ -241,6 +271,12 @@ extern int pm_suspend(suspend_state_t state); #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL +static inline void pm_suspend_clear_flags(void) {} +static inline void pm_set_suspend_via_firmware(void) {} +static inline void pm_set_resume_via_firmware(void) {} +static inline bool pm_suspend_via_firmware(void) { return false; } +static inline bool pm_resume_via_firmware(void) { return false; } + static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } static inline bool idle_should_freeze(void) { return false; } @@ -387,10 +423,12 @@ extern int unregister_pm_notifier(struct notifier_block *nb); /* drivers/base/power/wakeup.c */ extern bool events_check_enabled; +extern unsigned int pm_wakeup_irq; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_wakeup_clear(void); +extern void pm_system_irq_wakeup(unsigned int irq_number); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); @@ -440,6 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} static inline void pm_wakeup_clear(void) {} +static inline void pm_system_irq_wakeup(unsigned int irq_number) {} static inline void lock_system_sleep(void) {} static inline void unlock_system_sleep(void) {} diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a460e2ef2843..a156b82dd14c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -887,4 +887,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); + #endif diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9f65758311a4..ea090eaf468c 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -268,6 +268,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); +int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); @@ -451,6 +454,14 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, { } +static inline int __compat_only_sysfs_link_entry_to_kobj( + struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name) +{ + return 0; +} + static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 6a8b9942632d..dd8de82cf5b5 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -14,9 +14,9 @@ struct t10_pi_tuple { }; -extern struct blk_integrity t10_pi_type1_crc; -extern struct blk_integrity t10_pi_type1_ip; -extern struct blk_integrity t10_pi_type3_crc; -extern struct blk_integrity t10_pi_type3_ip; +extern struct blk_integrity_profile t10_pi_type1_crc; +extern struct blk_integrity_profile t10_pi_type1_ip; +extern struct blk_integrity_profile t10_pi_type3_crc; +extern struct blk_integrity_profile t10_pi_type3_ip; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 48c3696e8645..c906f4534581 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -112,10 +112,11 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; + struct skb_mstamp snt_synack; /* first SYNACK sent time */ bool tfo_listener; + u32 txhash; u32 rcv_isn; u32 snt_isn; - u32 snt_synack; /* synack sent time */ u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# @@ -193,6 +194,12 @@ struct tcp_sock { u32 window_clamp; /* Maximal window to advertise */ u32 rcv_ssthresh; /* Current window clamp */ + /* Information of the most recently (s)acked skb */ + struct tcp_rack { + struct skb_mstamp mstamp; /* (Re)sent time of the skb */ + u8 advanced; /* mstamp advanced since last lost marking */ + u8 reord; /* reordering detected */ + } rack; u16 advmss; /* Advertised MSS */ u8 unused; u8 nonagle : 4,/* Disable Nagle algorithm? */ @@ -216,6 +223,9 @@ struct tcp_sock { u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 rttvar_us; /* smoothed mdev_max */ u32 rtt_seq; /* sequence number to update rttvar */ + struct rtt_meas { + u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */ + } rtt_min[3]; u32 packets_out; /* Packets which are "in flight" */ u32 retrans_out; /* Retransmitted packets out */ @@ -279,8 +289,6 @@ struct tcp_sock { int lost_cnt_hint; u32 retransmit_high; /* L-bits may be on up to this seqno */ - u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ - u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -355,8 +363,8 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) struct tcp_timewait_sock { struct inet_timewait_sock tw_sk; - u32 tw_rcv_nxt; - u32 tw_snd_nxt; +#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt +#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt u32 tw_rcv_wnd; u32 tw_ts_offset; u32 tw_ts_recent; @@ -381,25 +389,12 @@ static inline bool tcp_passive_fastopen(const struct sock *sk) tcp_sk(sk)->fastopen_rsk != NULL); } -extern void tcp_sock_destruct(struct sock *sk); - -static inline int fastopen_init_queue(struct sock *sk, int backlog) +static inline void fastopen_queue_tune(struct sock *sk, int backlog) { - struct request_sock_queue *queue = - &inet_csk(sk)->icsk_accept_queue; - - if (queue->fastopenq == NULL) { - queue->fastopenq = kzalloc( - sizeof(struct fastopen_queue), - sk->sk_allocation); - if (queue->fastopenq == NULL) - return -ENOMEM; - - sk->sk_destruct = tcp_sock_destruct; - spin_lock_init(&queue->fastopenq->lock); - } - queue->fastopenq->max_qlen = backlog; - return 0; + struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; + int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); + + queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); } static inline void tcp_saved_syn_free(struct tcp_sock *tp) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 17292fee8686..157d366e761b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -360,7 +360,7 @@ static inline struct thermal_zone_device * thermal_zone_of_sensor_register(struct device *dev, int id, void *data, const struct thermal_zone_of_device_ops *ops) { - return NULL; + return ERR_PTR(-ENODEV); } static inline @@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) int power_actor_get_max_power(struct thermal_cooling_device *, struct thermal_zone_device *tz, u32 *max_power); +int power_actor_get_min_power(struct thermal_cooling_device *, + struct thermal_zone_device *tz, u32 *min_power); int power_actor_set_power(struct thermal_cooling_device *, struct thermal_instance *, u32); struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, @@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 *max_power) { return 0; } +static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, + struct thermal_zone_device *tz, + u32 *min_power) +{ return -ENODEV; } static inline int power_actor_set_power(struct thermal_cooling_device *cdev, struct thermal_instance *tz, u32 power) { return 0; } diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index d4217eff489f..0a0d56834c8e 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -158,6 +158,7 @@ struct st_data_s { unsigned long ll_state; void *kim_data; struct tty_struct *tty; + struct work_struct work_write_wakeup; }; /* diff --git a/include/linux/tick.h b/include/linux/tick.h index 48d901f83f92..e312219ff823 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -147,11 +147,20 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) cpumask_or(mask, mask, tick_nohz_full_mask); } +static inline int housekeeping_any_cpu(void) +{ + return cpumask_any_and(housekeeping_mask, cpu_online_mask); +} + extern void tick_nohz_full_kick(void); extern void tick_nohz_full_kick_cpu(int cpu); extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(void); #else +static inline int housekeeping_any_cpu(void) +{ + return smp_processor_id(); +} static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ba0ae09cbb21..ec89d846324c 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* * PPS accessor */ -extern void getnstime_raw_and_real(struct timespec *ts_raw, - struct timespec *ts_real); +extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, + struct timespec64 *ts_real); /* * Persistent clock related interfaces diff --git a/include/linux/timex.h b/include/linux/timex.h index 9d3f1a5b6178..39c25dbebfe8 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) extern int do_adjtimex(struct timex *); -extern void hardpps(const struct timespec *, const struct timespec *); +extern void hardpps(const struct timespec64 *, const struct timespec64 *); int read_current_timer(unsigned long *timer_val); void ntp_notify_cmos_timer(void); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 8350c538b486..706e63eea080 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -30,6 +30,8 @@ #define TPM_ANY_NUM 0xFFFF struct tpm_chip; +struct trusted_key_payload; +struct trusted_key_options; struct tpm_class_ops { const u8 req_complete_mask; @@ -46,11 +48,22 @@ struct tpm_class_ops { #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) +extern int tpm_is_tpm2(u32 chip_num); extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); +extern int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); +extern int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); #else +static inline int tpm_is_tpm2(u32 chip_num) +{ + return -ENODEV; +} static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { return -ENODEV; } @@ -63,5 +76,18 @@ static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { return -ENODEV; } + +static inline int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) +{ + return -ENODEV; +} +static inline int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) +{ + return -ENODEV; +} #endif #endif diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 84d497297c5f..26c152122a42 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -50,6 +50,7 @@ #include <linux/ptrace.h> #include <linux/security.h> #include <linux/task_work.h> +#include <linux/memcontrol.h> struct linux_binprm; /* @@ -188,6 +189,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) smp_mb__after_atomic(); if (unlikely(current->task_works)) task_work_run(); + + mem_cgroup_handle_over_high(); } #endif /* <linux/tracehook.h> */ diff --git a/include/linux/tty.h b/include/linux/tty.h index d072ded41678..5b04b0a5375b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -227,7 +227,6 @@ struct tty_port { int blocked_open; /* Waiting to open */ int count; /* Usage count */ wait_queue_head_t open_wait; /* Open waiters */ - wait_queue_head_t close_wait; /* Close waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* TTY flags ASY_*/ unsigned char console:1, /* port is a console */ @@ -424,6 +423,7 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, const char *routine); extern const char *tty_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); +extern int __tty_check_change(struct tty_struct *tty, int sig); extern int tty_check_change(struct tty_struct *tty); extern void __stop_tty(struct tty_struct *tty); extern void stop_tty(struct tty_struct *tty); @@ -467,6 +467,8 @@ extern void tty_buffer_free_all(struct tty_port *port); extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); extern void tty_buffer_init(struct tty_port *port); extern void tty_buffer_set_lock_subclass(struct tty_port *port); +extern bool tty_buffer_restart_work(struct tty_port *port); +extern bool tty_buffer_cancel_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, @@ -656,50 +658,6 @@ extern void __lockfunc tty_unlock(struct tty_struct *tty); extern void __lockfunc tty_lock_slave(struct tty_struct *tty); extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); -/* - * this shall be called only from where BTM is held (like close) - * - * We need this to ensure nobody waits for us to finish while we are waiting. - * Without this we were encountering system stalls. - * - * This should be indeed removed with BTM removal later. - * - * Locking: BTM required. Nobody is allowed to hold port->mutex. - */ -static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, - long timeout) -{ - tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ - tty_wait_until_sent(tty, timeout); - tty_lock(tty); -} - -/* - * wait_event_interruptible_tty -- wait for a condition with the tty lock held - * - * The condition we are waiting for might take a long time to - * become true, or might depend on another thread taking the - * BTM. In either case, we need to drop the BTM to guarantee - * forward progress. This is a leftover from the conversion - * from the BKL and should eventually get removed as the BTM - * falls out of use. - * - * Do not use in new code. - */ -#define wait_event_interruptible_tty(tty, wq, condition) \ -({ \ - int __ret = 0; \ - if (!(condition)) \ - __ret = __wait_event_interruptible_tty(tty, wq, \ - condition); \ - __ret; \ -}) - -#define __wait_event_interruptible_tty(tty, wq, condition) \ - ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ - tty_unlock(tty); \ - schedule(); \ - tty_lock(tty)) #ifdef CONFIG_PROC_FS extern void proc_tty_register_driver(struct tty_driver *); diff --git a/include/linux/types.h b/include/linux/types.h index c314989d9158..70d8500bddf1 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -205,11 +205,25 @@ struct ustat { * struct callback_head - callback structure for use with RCU and task_work * @next: next update requests in a list * @func: actual update function to call after the grace period. + * + * The struct is aligned to size of pointer. On most architectures it happens + * naturally due ABI requirements, but some architectures (like CRIS) have + * weird ABI and we need to ask it explicitly. + * + * The alignment is required to guarantee that bits 0 and 1 of @next will be + * clear under normal conditions -- as long as we use call_rcu(), + * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. + * + * This guarantee is important for few reasons: + * - future call_rcu_lazy() will make use of lower bits in the pointer; + * - the structure shares storage spacer in struct page with @compound_head, + * which encode PageTail() in bit 0. The guarantee is needed to avoid + * false-positive PageTail(). */ struct callback_head { struct callback_head *next; void (*func)(struct callback_head *head); -}; +} __attribute__((aligned(sizeof(void *)))); #define rcu_head callback_head typedef void (*rcu_callback_t)(struct rcu_head *head); diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d6f2c2c5b043..558129af828a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -75,36 +75,6 @@ static inline unsigned long __copy_from_user_nocache(void *to, #endif /* ARCH_HAS_NOCACHE_UACCESS */ -/** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - its type is type typeof(retval)* - * @retval: read into this variable - * - * Safely read from address @addr into variable @revtal. If a kernel fault - * happens, handle that and return -EFAULT. - * We ensure that the __get_user() is executed in atomic context so that - * do_page_fault() doesn't attempt to take mmap_sem. This makes - * probe_kernel_address() suitable for use within regions where the caller - * already holds mmap_sem, or other locks which nest inside mmap_sem. - * This must be a macro because __get_user() needs to know the types of the - * args. - * - * We don't include enough header files to be able to do the set_fs(). We - * require that the probe_kernel_address() caller will do that. - */ -#define probe_kernel_address(addr, retval) \ - ({ \ - long ret; \ - mm_segment_t old_fs = get_fs(); \ - \ - set_fs(KERNEL_DS); \ - pagefault_disable(); \ - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ - pagefault_enable(); \ - set_fs(old_fs); \ - ret; \ - }) - /* * probe_kernel_read(): safely attempt to read from a location * @dst: pointer to the buffer that shall take the data @@ -131,4 +101,14 @@ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +/** + * probe_kernel_address(): safely attempt to read from a location + * @addr: address to read from + * @retval: read into this variable + * + * Returns 0 on success, or -EFAULT. + */ +#define probe_kernel_address(addr, retval) \ + probe_kernel_read(&retval, addr, sizeof(retval)) + #endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/usb.h b/include/linux/usb.h index 447fe29b55b4..b9a28074210f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -122,6 +122,8 @@ enum usb_interface_condition { * has been deferred. * @needs_binding: flag set when the driver should be re-probed or unbound * following a reset or suspend operation it doesn't support. + * @authorized: This allows to (de)authorize individual interfaces instead + * a whole device in contrast to the device authorization. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. @@ -178,6 +180,7 @@ struct usb_interface { unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ unsigned resetting_device:1; /* true: bandwidth alloc after reset */ + unsigned authorized:1; /* used for interface authorization */ struct device dev; /* interface specific device info */ struct device *usb_dev; @@ -325,6 +328,7 @@ struct usb_host_bos { /* wireless cap descriptor is handled by wusb */ struct usb_ext_cap_descriptor *ext_cap; struct usb_ss_cap_descriptor *ss_cap; + struct usb_ssp_cap_descriptor *ssp_cap; struct usb_ss_container_id_descriptor *ss_id; }; diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h new file mode 100644 index 000000000000..b5706f94ee9e --- /dev/null +++ b/include/linux/usb/cdc.h @@ -0,0 +1,51 @@ +/* + * USB CDC common helpers + * + * Copyright (c) 2015 Oliver Neukum <oneukum@suse.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#ifndef __LINUX_USB_CDC_H +#define __LINUX_USB_CDC_H + +#include <uapi/linux/usb/cdc.h> + +/* + * inofficial magic numbers + */ + +#define CDC_PHONET_MAGIC_NUMBER 0xAB + +/* + * parsing CDC headers + */ + +struct usb_cdc_parsed_header { + struct usb_cdc_union_desc *usb_cdc_union_desc; + struct usb_cdc_header_desc *usb_cdc_header_desc; + + struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor; + struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor; + struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc; + struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc; + struct usb_cdc_ether_desc *usb_cdc_ether_desc; + struct usb_cdc_dmm_desc *usb_cdc_dmm_desc; + struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc; + struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc; + struct usb_cdc_obex_desc *usb_cdc_obex_desc; + struct usb_cdc_ncm_desc *usb_cdc_ncm_desc; + struct usb_cdc_mbim_desc *usb_cdc_mbim_desc; + struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc; + + bool phonet_magic_present; +}; + +struct usb_interface; +int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, + struct usb_interface *intf, + u8 *buffer, + int buflen); + +#endif /* __LINUX_USB_CDC_H */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 27603bcbb9b9..6cc96bb12ddc 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -32,9 +32,9 @@ #ifndef __LINUX_USB_CH9_H #define __LINUX_USB_CH9_H +#include <linux/device.h> #include <uapi/linux/usb/ch9.h> - /** * usb_speed_string() - Returns human readable-name of the speed. * @speed: The speed to return human-readable name for. If it's not @@ -43,6 +43,15 @@ */ extern const char *usb_speed_string(enum usb_device_speed speed); +/** + * usb_get_maximum_speed - Get maximum requested speed for a given USB + * controller. + * @dev: Pointer to the given USB controller device + * + * The function gets the maximum speed string from property "maximum-speed", + * and returns the corresponding enum usb_device_speed. + */ +extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); /** * usb_state_string - Returns human readable name for the state. diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index a41833cd184c..5dd75fa47dd8 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -5,9 +5,28 @@ #ifndef __LINUX_USB_CHIPIDEA_H #define __LINUX_USB_CHIPIDEA_H +#include <linux/extcon.h> #include <linux/usb/otg.h> struct ci_hdrc; + +/** + * struct ci_hdrc_cable - structure for external connector cable state tracking + * @state: current state of the line + * @changed: set to true when extcon event happen + * @edev: device which generate events + * @ci: driver state of the chipidea device + * @nb: hold event notification callback + * @conn: used for notification registration + */ +struct ci_hdrc_cable { + bool state; + bool changed; + struct extcon_dev *edev; + struct ci_hdrc *ci; + struct notifier_block nb; +}; + struct ci_hdrc_platform_data { const char *name; /* offset of the capability registers */ @@ -48,6 +67,11 @@ struct ci_hdrc_platform_data { u32 ahb_burst_config; u32 tx_burst_size; u32 rx_burst_size; + + /* VBUS and ID signal state tracking, using extcon framework */ + struct ci_hdrc_cable vbus_extcon; + struct ci_hdrc_cable id_extcon; + u32 phy_clkgate_delay_us; }; /* Default offset of capability registers */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index c14a69b36d27..3d583a10b926 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -215,6 +215,7 @@ struct usb_ep { struct list_head ep_list; struct usb_ep_caps caps; bool claimed; + bool enabled; unsigned maxpacket:16; unsigned maxpacket_limit:16; unsigned max_streams:16; @@ -264,7 +265,18 @@ static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, */ static inline int usb_ep_enable(struct usb_ep *ep) { - return ep->ops->enable(ep, ep->desc); + int ret; + + if (ep->enabled) + return 0; + + ret = ep->ops->enable(ep, ep->desc); + if (ret) + return ret; + + ep->enabled = true; + + return 0; } /** @@ -281,7 +293,18 @@ static inline int usb_ep_enable(struct usb_ep *ep) */ static inline int usb_ep_disable(struct usb_ep *ep) { - return ep->ops->disable(ep); + int ret; + + if (!ep->enabled) + return 0; + + ret = ep->ops->disable(ep); + if (ret) + return ret; + + ep->enabled = false; + + return 0; } /** @@ -1233,6 +1256,8 @@ extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); +extern void usb_ep_autoconfig_release(struct usb_ep *); + extern void usb_ep_autoconfig_reset(struct usb_gadget *); #endif /* __LINUX_USB_GADGET_H */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index d2784c10bfe2..f89c24bd53a4 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -58,12 +58,6 @@ * * Since "struct usb_bus" is so thin, you can't share much code in it. * This framework is a layer over that, and should be more sharable. - * - * @authorized_default: Specifies if new devices are authorized to - * connect by default or they require explicit - * user space authorization; this bit is settable - * through /sys/class/usb_host/X/authorized_default. - * For the rest is RO, so we don't lock to r/w it. */ /*-------------------------------------------------------------------------*/ @@ -120,6 +114,8 @@ struct usb_hcd { #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ +#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ +#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -131,6 +127,22 @@ struct usb_hcd { #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) + /* + * Specifies if interfaces are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/interface_authorized_default + */ +#define HCD_INTF_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) + + /* + * Specifies if devices are authorized by default + * or they require explicit user space authorization; this bit is + * settable through /sys/class/usb_host/X/authorized_default + */ +#define HCD_DEV_AUTHORIZED(hcd) \ + ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED)) + /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ @@ -141,7 +153,6 @@ struct usb_hcd { * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; unsigned wireless:1; /* Wireless USB HCD */ - unsigned authorized_default:1; unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ @@ -239,6 +250,7 @@ struct hc_driver { #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/ #define HCD_USB3 0x0040 /* USB 3.0 */ +#define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index a4ee1b582183..fa6dc132bd1b 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -95,7 +95,7 @@ struct musb_hdrc_config { /* musb CLKIN in Blackfin in MHZ */ unsigned char clkin; #endif - + u32 maximum_speed; }; struct musb_hdrc_platform_data { diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 8c5a818ec244..c3fe9e48ce27 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,22 +12,10 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) -enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); -enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); #else -static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) -{ - return USB_DR_MODE_UNKNOWN; -} - -static inline enum usb_device_speed -of_usb_get_maximum_speed(struct device_node *np) -{ - return USB_SPEED_UNKNOWN; -} static inline bool of_usb_host_tpl_support(struct device_node *np) { return false; diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index bd1dcf816100..67929df86df5 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -119,4 +119,13 @@ enum usb_dr_mode { USB_DR_MODE_OTG, }; +/** + * usb_get_dr_mode - Get dual role mode for given device + * @dev: Pointer to the given device + * + * The function gets phy interface string from property 'dr_mode', + * and returns the correspondig enum usb_dr_mode + */ +extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); + #endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index e39f251cf861..31a8068c42a5 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -63,7 +63,7 @@ enum usb_otg_state { struct usb_phy; struct usb_otg; -/* for transceivers connected thru an ULPI interface, the user must +/* for phys connected thru an ULPI interface, the user must * provide access ops */ struct usb_phy_io_ops { @@ -92,10 +92,10 @@ struct usb_phy { u16 port_status; u16 port_change; - /* to support controllers that have multiple transceivers */ + /* to support controllers that have multiple phys */ struct list_head head; - /* initialize/shutdown the OTG controller */ + /* initialize/shutdown the phy */ int (*init)(struct usb_phy *x); void (*shutdown)(struct usb_phy *x); @@ -106,7 +106,7 @@ struct usb_phy { int (*set_power)(struct usb_phy *x, unsigned mA); - /* Set transceiver into suspend mode */ + /* Set phy into suspend mode */ int (*set_suspend)(struct usb_phy *x, int suspend); diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - u32 type; + uintptr_t type; u32 enable_gpio; /* diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 9246d32dc973..e623d392db0c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -14,12 +14,12 @@ #endif #ifdef CONFIG_HIGHMEM -#define HIGHMEM_ZONE(xx) , xx##_HIGH +#define HIGHMEM_ZONE(xx) xx##_HIGH, #else #define HIGHMEM_ZONE(xx) #endif -#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE +#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0ec598381f97..3bff87a25a42 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -182,22 +182,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) # endif #endif -struct vmalloc_info { - unsigned long used; - unsigned long largest_chunk; -}; - #ifdef CONFIG_MMU #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) -extern void get_vmalloc_info(struct vmalloc_info *vmi); #else - #define VMALLOC_TOTAL 0UL -#define get_vmalloc_info(vmi) \ -do { \ - (vmi)->used = 0; \ - (vmi)->largest_chunk = 0; \ -} while (0) #endif #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vme.h b/include/linux/vme.h index c0131358f351..71e4a6dec5ac 100644 --- a/include/linux/vme.h +++ b/include/linux/vme.h @@ -81,6 +81,9 @@ struct vme_resource { extern struct bus_type vme_bus_type; +/* Number of VME interrupt vectors */ +#define VME_NUM_STATUSID 256 + /* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ #define VME_MAX_BRIDGES (sizeof(unsigned int)*8) #define VME_MAX_SLOTS 32 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 82e7db7f7100..5dbc8b0ee567 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -161,30 +161,8 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, } #ifdef CONFIG_NUMA -/* - * Determine the per node value of a stat item. This function - * is called frequently in a NUMA machine, so try to be as - * frugal as possible. - */ -static inline unsigned long node_page_state(int node, - enum zone_stat_item item) -{ - struct zone *zones = NODE_DATA(node)->node_zones; - - return -#ifdef CONFIG_ZONE_DMA - zone_page_state(&zones[ZONE_DMA], item) + -#endif -#ifdef CONFIG_ZONE_DMA32 - zone_page_state(&zones[ZONE_DMA32], item) + -#endif -#ifdef CONFIG_HIGHMEM - zone_page_state(&zones[ZONE_HIGHMEM], item) + -#endif - zone_page_state(&zones[ZONE_NORMAL], item) + - zone_page_state(&zones[ZONE_MOVABLE], item); -} +extern unsigned long node_page_state(int node, enum zone_stat_item item); extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else @@ -269,7 +247,6 @@ static inline void __dec_zone_page_state(struct page *page, #define set_pgdat_percpu_threshold(pgdat, callback) { } -static inline void refresh_cpu_vm_stats(int cpu) { } static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } diff --git a/include/linux/wait.h b/include/linux/wait.h index d3d077228d4c..1e1bf9f963a9 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) typedef int wait_bit_action_f(struct wait_bit_key *); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); -void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, - void *key); +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); @@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up_poll(x, m) \ __wake_up(x, TASK_NORMAL, 1, (void *) (m)) #define wake_up_locked_poll(x, m) \ - __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) + __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) #define wake_up_interruptible_poll(x, m) \ __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) #define wake_up_interruptible_sync_poll(x, m) \ diff --git a/include/media/atmel-isi.h b/include/media/atmel-isi.h deleted file mode 100644 index 6008b0985b7b..000000000000 --- a/include/media/atmel-isi.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Register definitions for the Atmel Image Sensor Interface. - * - * Copyright (C) 2011 Atmel Corporation - * Josh Wu, <josh.wu@atmel.com> - * - * Based on previous work by Lars Haring, <lars.haring@atmel.com> - * and Sedji Gaouaou - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ATMEL_ISI_H__ -#define __ATMEL_ISI_H__ - -#include <linux/types.h> - -/* ISI_V2 register offsets */ -#define ISI_CFG1 0x0000 -#define ISI_CFG2 0x0004 -#define ISI_PSIZE 0x0008 -#define ISI_PDECF 0x000c -#define ISI_Y2R_SET0 0x0010 -#define ISI_Y2R_SET1 0x0014 -#define ISI_R2Y_SET0 0x0018 -#define ISI_R2Y_SET1 0x001C -#define ISI_R2Y_SET2 0x0020 -#define ISI_CTRL 0x0024 -#define ISI_STATUS 0x0028 -#define ISI_INTEN 0x002C -#define ISI_INTDIS 0x0030 -#define ISI_INTMASK 0x0034 -#define ISI_DMA_CHER 0x0038 -#define ISI_DMA_CHDR 0x003C -#define ISI_DMA_CHSR 0x0040 -#define ISI_DMA_P_ADDR 0x0044 -#define ISI_DMA_P_CTRL 0x0048 -#define ISI_DMA_P_DSCR 0x004C -#define ISI_DMA_C_ADDR 0x0050 -#define ISI_DMA_C_CTRL 0x0054 -#define ISI_DMA_C_DSCR 0x0058 - -/* Bitfields in CFG1 */ -#define ISI_CFG1_HSYNC_POL_ACTIVE_LOW (1 << 2) -#define ISI_CFG1_VSYNC_POL_ACTIVE_LOW (1 << 3) -#define ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING (1 << 4) -#define ISI_CFG1_EMB_SYNC (1 << 6) -#define ISI_CFG1_CRC_SYNC (1 << 7) -/* Constants for FRATE(ISI_V2) */ -#define ISI_CFG1_FRATE_CAPTURE_ALL (0 << 8) -#define ISI_CFG1_FRATE_DIV_2 (1 << 8) -#define ISI_CFG1_FRATE_DIV_3 (2 << 8) -#define ISI_CFG1_FRATE_DIV_4 (3 << 8) -#define ISI_CFG1_FRATE_DIV_5 (4 << 8) -#define ISI_CFG1_FRATE_DIV_6 (5 << 8) -#define ISI_CFG1_FRATE_DIV_7 (6 << 8) -#define ISI_CFG1_FRATE_DIV_8 (7 << 8) -#define ISI_CFG1_FRATE_DIV_MASK (7 << 8) -#define ISI_CFG1_DISCR (1 << 11) -#define ISI_CFG1_FULL_MODE (1 << 12) -/* Definition for THMASK(ISI_V2) */ -#define ISI_CFG1_THMASK_BEATS_4 (0 << 13) -#define ISI_CFG1_THMASK_BEATS_8 (1 << 13) -#define ISI_CFG1_THMASK_BEATS_16 (2 << 13) - -/* Bitfields in CFG2 */ -#define ISI_CFG2_GRAYSCALE (1 << 13) -/* Constants for YCC_SWAP(ISI_V2) */ -#define ISI_CFG2_YCC_SWAP_DEFAULT (0 << 28) -#define ISI_CFG2_YCC_SWAP_MODE_1 (1 << 28) -#define ISI_CFG2_YCC_SWAP_MODE_2 (2 << 28) -#define ISI_CFG2_YCC_SWAP_MODE_3 (3 << 28) -#define ISI_CFG2_YCC_SWAP_MODE_MASK (3 << 28) -#define ISI_CFG2_IM_VSIZE_OFFSET 0 -#define ISI_CFG2_IM_HSIZE_OFFSET 16 -#define ISI_CFG2_IM_VSIZE_MASK (0x7FF << ISI_CFG2_IM_VSIZE_OFFSET) -#define ISI_CFG2_IM_HSIZE_MASK (0x7FF << ISI_CFG2_IM_HSIZE_OFFSET) - -/* Bitfields in CTRL */ -/* Also using in SR(ISI_V2) */ -#define ISI_CTRL_EN (1 << 0) -#define ISI_CTRL_CDC (1 << 8) -/* Also using in SR/IER/IDR/IMR(ISI_V2) */ -#define ISI_CTRL_DIS (1 << 1) -#define ISI_CTRL_SRST (1 << 2) - -/* Bitfields in SR */ -#define ISI_SR_SIP (1 << 19) -/* Also using in SR/IER/IDR/IMR */ -#define ISI_SR_VSYNC (1 << 10) -#define ISI_SR_PXFR_DONE (1 << 16) -#define ISI_SR_CXFR_DONE (1 << 17) -#define ISI_SR_P_OVR (1 << 24) -#define ISI_SR_C_OVR (1 << 25) -#define ISI_SR_CRC_ERR (1 << 26) -#define ISI_SR_FR_OVR (1 << 27) - -/* Bitfields in DMA_C_CTRL & in DMA_P_CTRL */ -#define ISI_DMA_CTRL_FETCH (1 << 0) -#define ISI_DMA_CTRL_WB (1 << 1) -#define ISI_DMA_CTRL_IEN (1 << 2) -#define ISI_DMA_CTRL_DONE (1 << 3) - -/* Bitfields in DMA_CHSR/CHER/CHDR */ -#define ISI_DMA_CHSR_P_CH (1 << 0) -#define ISI_DMA_CHSR_C_CH (1 << 1) - -/* Definition for isi_platform_data */ -#define ISI_DATAWIDTH_8 0x01 -#define ISI_DATAWIDTH_10 0x02 - -struct v4l2_async_subdev; - -struct isi_platform_data { - u8 has_emb_sync; - u8 emb_crc_sync; - u8 hsync_act_low; - u8 vsync_act_low; - u8 pclk_act_falling; - u8 full_mode; - u32 data_width_flags; - /* Using for ISI_CFG1 */ - u32 frate; - /* Using for ISI_MCK */ - u32 mck_hz; - struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */ - int *asd_sizes; /* 0-terminated array of asd group sizes */ -}; - -#endif /* __ATMEL_ISI_H__ */ diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h index fa0247ad815f..e14a9370b67e 100644 --- a/include/media/davinci/vpbe_display.h +++ b/include/media/davinci/vpbe_display.h @@ -17,6 +17,7 @@ #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/v4l2-fh.h> +#include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include <media/davinci/vpbe_types.h> #include <media/davinci/vpbe_osd.h> @@ -64,7 +65,7 @@ struct display_layer_info { }; struct vpbe_disp_buffer { - struct vb2_buffer vb; + struct vb2_v4l2_buffer vb; struct list_head list; }; diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h index 05e7ad5d2c8b..0ab59a571fee 100644 --- a/include/media/lirc_dev.h +++ b/include/media/lirc_dev.h @@ -118,6 +118,71 @@ static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf, return ret; } +/** + * struct lirc_driver - Defines the parameters on a LIRC driver + * + * @name: this string will be used for logs + * + * @minor: indicates minor device (/dev/lirc) number for + * registered driver if caller fills it with negative + * value, then the first free minor number will be used + * (if available). + * + * @code_length: length of the remote control key code expressed in bits. + * + * @buffer_size: Number of FIFO buffers with @chunk_size size. If zero, + * creates a buffer with BUFLEN size (16 bytes). + * + * @sample_rate: if zero, the device will wait for an event with a new + * code to be parsed. Otherwise, specifies the sample + * rate for polling. Value should be between 0 + * and HZ. If equal to HZ, it would mean one polling per + * second. + * + * @features: lirc compatible hardware features, like LIRC_MODE_RAW, + * LIRC_CAN_*, as defined at include/media/lirc.h. + * + * @chunk_size: Size of each FIFO buffer. + * + * @data: it may point to any driver data and this pointer will + * be passed to all callback functions. + * + * @min_timeout: Minimum timeout for record. Valid only if + * LIRC_CAN_SET_REC_TIMEOUT is defined. + * + * @max_timeout: Maximum timeout for record. Valid only if + * LIRC_CAN_SET_REC_TIMEOUT is defined. + * + * @add_to_buf: add_to_buf will be called after specified period of the + * time or triggered by the external event, this behavior + * depends on value of the sample_rate this function will + * be called in user context. This routine should return + * 0 if data was added to the buffer and -ENODATA if none + * was available. This should add some number of bits + * evenly divisible by code_length to the buffer. + * + * @rbuf: if not NULL, it will be used as a read buffer, you will + * have to write to the buffer by other means, like irq's + * (see also lirc_serial.c). + * + * @set_use_inc: set_use_inc will be called after device is opened + * + * @set_use_dec: set_use_dec will be called after device is closed + * + * @rdev: Pointed to struct rc_dev associated with the LIRC + * device. + * + * @fops: file_operations for drivers which don't fit the current + * driver model. + * Some ioctl's can be directly handled by lirc_dev if the + * driver's ioctl function is NULL or if it returns + * -ENOIOCTLCMD (see also lirc_serial.c). + * + * @dev: pointer to the struct device associated with the LIRC + * device. + * + * @owner: the module owning this struct + */ struct lirc_driver { char name[40]; int minor; @@ -131,65 +196,16 @@ struct lirc_driver { void *data; int min_timeout; int max_timeout; - int (*add_to_buf) (void *data, struct lirc_buffer *buf); + int (*add_to_buf)(void *data, struct lirc_buffer *buf); struct lirc_buffer *rbuf; - int (*set_use_inc) (void *data); - void (*set_use_dec) (void *data); + int (*set_use_inc)(void *data); + void (*set_use_dec)(void *data); struct rc_dev *rdev; const struct file_operations *fops; struct device *dev; struct module *owner; }; -/* name: - * this string will be used for logs - * - * minor: - * indicates minor device (/dev/lirc) number for registered driver - * if caller fills it with negative value, then the first free minor - * number will be used (if available) - * - * code_length: - * length of the remote control key code expressed in bits - * - * sample_rate: - * - * data: - * it may point to any driver data and this pointer will be passed to - * all callback functions - * - * add_to_buf: - * add_to_buf will be called after specified period of the time or - * triggered by the external event, this behavior depends on value of - * the sample_rate this function will be called in user context. This - * routine should return 0 if data was added to the buffer and - * -ENODATA if none was available. This should add some number of bits - * evenly divisible by code_length to the buffer - * - * rbuf: - * if not NULL, it will be used as a read buffer, you will have to - * write to the buffer by other means, like irq's (see also - * lirc_serial.c). - * - * set_use_inc: - * set_use_inc will be called after device is opened - * - * set_use_dec: - * set_use_dec will be called after device is closed - * - * fops: - * file_operations for drivers which don't fit the current driver model. - * - * Some ioctl's can be directly handled by lirc_dev if the driver's - * ioctl function is NULL or if it returns -ENOIOCTLCMD (see also - * lirc_serial.c). - * - * owner: - * the module owning this struct - * - */ - - /* following functions can be called ONLY from user context * * returns negative value on error or minor number diff --git a/include/media/media-entity.h b/include/media/media-entity.h index 0c003d817493..197f93799753 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -116,6 +116,13 @@ static inline u32 media_entity_subtype(struct media_entity *entity) #define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 #define MEDIA_ENTITY_ENUM_MAX_ID 64 +/* + * The number of pads can't be bigger than the number of entities, + * as the worse-case scenario is to have one entity linked up to + * MEDIA_ENTITY_ENUM_MAX_ID - 1 entities. + */ +#define MEDIA_ENTITY_MAX_PADS (MEDIA_ENTITY_ENUM_MAX_ID - 1) + struct media_entity_graph { struct { struct media_entity *entity; diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 2f6261f3e570..97aa13314bfd 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -18,7 +18,7 @@ #include <linux/pm.h> #include <linux/videodev2.h> #include <media/videobuf-core.h> -#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> #include <media/v4l2-async.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h index ab03c5344209..094e112cc325 100644 --- a/include/media/tuner-types.h +++ b/include/media/tuner-types.h @@ -5,6 +5,15 @@ #ifndef __TUNER_TYPES_H__ #define __TUNER_TYPES_H__ +/** + * enum param_type - type of the tuner pameters + * + * @TUNER_PARAM_TYPE_RADIO: Tuner params are for FM and/or AM radio + * @TUNER_PARAM_TYPE_PAL: Tuner params are for PAL color TV standard + * @TUNER_PARAM_TYPE_SECAM: Tuner params are for SECAM color TV standard + * @TUNER_PARAM_TYPE_NTSC: Tuner params are for NTSC color TV standard + * @TUNER_PARAM_TYPE_DIGITAL: Tuner params are for digital TV + */ enum param_type { TUNER_PARAM_TYPE_RADIO, TUNER_PARAM_TYPE_PAL, @@ -13,97 +22,142 @@ enum param_type { TUNER_PARAM_TYPE_DIGITAL, }; +/** + * struct tuner_range - define the frequencies supported by the tuner + * + * @limit: Max frequency supported by that range, in 62.5 kHz + * (TV) or 62.5 Hz (Radio), as defined by + * V4L2_TUNER_CAP_LOW. + * @config: Value of the band switch byte (BB) to setup this mode. + * @cb: Value of the CB byte to setup this mode. + * + * Please notice that digital tuners like xc3028/xc4000/xc5000 don't use + * those ranges, as they're defined inside the driver. This is used by + * analog tuners that are compatible with the "Philips way" to setup the + * tuners. On those devices, the tuner set is done via 4 bytes: + * divider byte1 (DB1), divider byte 2 (DB2), Control byte (CB) and + * band switch byte (BB). + * Some tuners also have an additional optional Auxiliary byte (AB). + */ struct tuner_range { unsigned short limit; unsigned char config; unsigned char cb; }; +/** + * struct tuner_params - Parameters to be used to setup the tuner. Those + * are used by drivers/media/tuners/tuner-types.c in + * order to specify the tuner properties. Most of + * the parameters are for tuners based on tda9887 IF-PLL + * multi-standard analog TV/Radio demodulator, with is + * very common on legacy analog tuners. + * + * @type: Type of the tuner parameters, as defined at + * enum param_type. If the tuner supports multiple + * standards, an array should be used, with one + * row per different standard. + * @cb_first_if_lower_freq: Many Philips-based tuners have a comment in + * their datasheet like + * "For channel selection involving band + * switching, and to ensure smooth tuning to the + * desired channel without causing unnecessary + * charge pump action, it is recommended to + * consider the difference between wanted channel + * frequency and the current channel frequency. + * Unnecessary charge pump action will result + * in very low tuning voltage which may drive the + * oscillator to extreme conditions". + * Set cb_first_if_lower_freq to 1, if this check + * is required for this tuner. I tested this for + * PAL by first setting the TV frequency to + * 203 MHz and then switching to 96.6 MHz FM + * radio. The result was static unless the + * control byte was sent first. + * @has_tda9887: Set to 1 if this tuner uses a tda9887 + * @port1_fm_high_sensitivity: Many Philips tuners use tda9887 PORT1 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT1 to 1 to get proper FM reception. + * @port2_fm_high_sensitivity: Some Philips tuners use tda9887 PORT2 to select + * the FM radio sensitivity. If this setting is 1, + * then set PORT2 to 1 to get proper FM reception. + * @fm_gain_normal: Some Philips tuners use tda9887 cGainNormal to + * select the FM radio sensitivity. If this + * setting is 1, e register will use cGainNormal + * instead of cGainLow. + * @intercarrier_mode: Most tuners with a tda9887 use QSS mode. + * Some (cheaper) tuners use Intercarrier mode. + * If this setting is 1, then the tuner needs to + * be set to intercarrier mode. + * @port1_active: This setting sets the default value for PORT1. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B6 bit. + * @port2_active: This setting sets the default value for PORT2. + * 0 means inactive, 1 means active. Note: the + * actual bit value written to the tda9887 is + * inverted. So a 0 here means a 1 in the B7 bit. + * @port1_invert_for_secam_lc: Sometimes PORT1 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port2_invert_for_secam_lc: Sometimes PORT2 is inverted when the SECAM-L' + * standard is selected. Set this bit to 1 if this + * is needed. + * @port1_set_for_fm_mono: Some cards require PORT1 to be 1 for mono Radio + * FM and 0 for stereo. + * @default_pll_gating_18: Select 18% (or according to datasheet 0%) + * L standard PLL gating, vs the driver default + * of 36%. + * @radio_if: IF to use in radio mode. Tuners with a + * separate radio IF filter seem to use 10.7, + * while those without use 33.3 for PAL/SECAM + * tuners and 41.3 for NTSC tuners. + * 0 = 10.7, 1 = 33.3, 2 = 41.3 + * @default_top_low: Default tda9887 TOP value in dB for the low + * band. Default is 0. Range: -16:+15 + * @default_top_mid: Default tda9887 TOP value in dB for the mid + * band. Default is 0. Range: -16:+15 + * @default_top_high: Default tda9887 TOP value in dB for the high + * band. Default is 0. Range: -16:+15 + * @default_top_secam_low: Default tda9887 TOP value in dB for SECAM-L/L' + * for the low band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_mid: Default tda9887 TOP value in dB for SECAM-L/L' + * for the mid band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @default_top_secam_high: Default tda9887 TOP value in dB for SECAM-L/L' + * for the high band. Default is 0. Several tuners + * require a different TOP value for the + * SECAM-L/L' standards. Range: -16:+15 + * @iffreq: Intermediate frequency (IF) used by the tuner + * on digital mode. + * @count: Size of the ranges array. + * @ranges: Array with the frequency ranges supported by + * the tuner. + */ struct tuner_params { enum param_type type; - /* Many Philips based tuners have a comment like this in their - * datasheet: - * - * For channel selection involving band switching, and to ensure - * smooth tuning to the desired channel without causing - * unnecessary charge pump action, it is recommended to consider - * the difference between wanted channel frequency and the - * current channel frequency. Unnecessary charge pump action - * will result in very low tuning voltage which may drive the - * oscillator to extreme conditions. - * - * Set cb_first_if_lower_freq to 1, if this check is - * required for this tuner. - * - * I tested this for PAL by first setting the TV frequency to - * 203 MHz and then switching to 96.6 MHz FM radio. The result was - * static unless the control byte was sent first. - */ unsigned int cb_first_if_lower_freq:1; - /* Set to 1 if this tuner uses a tda9887 */ unsigned int has_tda9887:1; - /* Many Philips tuners use tda9887 PORT1 to select the FM radio - sensitivity. If this setting is 1, then set PORT1 to 1 to - get proper FM reception. */ unsigned int port1_fm_high_sensitivity:1; - /* Some Philips tuners use tda9887 PORT2 to select the FM radio - sensitivity. If this setting is 1, then set PORT2 to 1 to - get proper FM reception. */ unsigned int port2_fm_high_sensitivity:1; - /* Some Philips tuners use tda9887 cGainNormal to select the FM radio - sensitivity. If this setting is 1, e register will use cGainNormal - instead of cGainLow. */ unsigned int fm_gain_normal:1; - /* Most tuners with a tda9887 use QSS mode. Some (cheaper) tuners - use Intercarrier mode. If this setting is 1, then the tuner - needs to be set to intercarrier mode. */ unsigned int intercarrier_mode:1; - /* This setting sets the default value for PORT1. - 0 means inactive, 1 means active. Note: the actual bit - value written to the tda9887 is inverted. So a 0 here - means a 1 in the B6 bit. */ unsigned int port1_active:1; - /* This setting sets the default value for PORT2. - 0 means inactive, 1 means active. Note: the actual bit - value written to the tda9887 is inverted. So a 0 here - means a 1 in the B7 bit. */ unsigned int port2_active:1; - /* Sometimes PORT1 is inverted when the SECAM-L' standard is selected. - Set this bit to 1 if this is needed. */ unsigned int port1_invert_for_secam_lc:1; - /* Sometimes PORT2 is inverted when the SECAM-L' standard is selected. - Set this bit to 1 if this is needed. */ unsigned int port2_invert_for_secam_lc:1; - /* Some cards require PORT1 to be 1 for mono Radio FM and 0 for stereo. */ unsigned int port1_set_for_fm_mono:1; - /* Select 18% (or according to datasheet 0%) L standard PLL gating, - vs the driver default of 36%. */ unsigned int default_pll_gating_18:1; - /* IF to use in radio mode. Tuners with a separate radio IF filter - seem to use 10.7, while those without use 33.3 for PAL/SECAM tuners - and 41.3 for NTSC tuners. 0 = 10.7, 1 = 33.3, 2 = 41.3 */ unsigned int radio_if:2; - /* Default tda9887 TOP value in dB for the low band. Default is 0. - Range: -16:+15 */ signed int default_top_low:5; - /* Default tda9887 TOP value in dB for the mid band. Default is 0. - Range: -16:+15 */ signed int default_top_mid:5; - /* Default tda9887 TOP value in dB for the high band. Default is 0. - Range: -16:+15 */ signed int default_top_high:5; - /* Default tda9887 TOP value in dB for SECAM-L/L' for the low band. - Default is 0. Several tuners require a different TOP value for - the SECAM-L/L' standards. Range: -16:+15 */ signed int default_top_secam_low:5; - /* Default tda9887 TOP value in dB for SECAM-L/L' for the mid band. - Default is 0. Several tuners require a different TOP value for - the SECAM-L/L' standards. Range: -16:+15 */ signed int default_top_secam_mid:5; - /* Default tda9887 TOP value in dB for SECAM-L/L' for the high band. - Default is 0. Several tuners require a different TOP value for - the SECAM-L/L' standards. Range: -16:+15 */ signed int default_top_secam_high:5; u16 iffreq; diff --git a/include/media/tuner.h b/include/media/tuner.h index b46ebb48fe74..486b6a54363b 100644 --- a/include/media/tuner.h +++ b/include/media/tuner.h @@ -1,23 +1,19 @@ /* - tuner.h - definition for different tuners - - Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) - minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ + * tuner.h - definition for different tuners + * + * Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de) + * minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ #ifndef _TUNER_H #define _TUNER_H @@ -83,8 +79,11 @@ #define TUNER_PHILIPS_FM1236_MK3 43 #define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */ -/* Microtune merged with Temic 12/31/1999 partially financed by Alps - these may be similar to Temic */ -#define TUNER_MICROTUNE_4049FM5 45 + /* + * Microtune merged with Temic 12/31/1999 partially financed by Alps. + * these may be similar to Temic + */ +#define TUNER_MICROTUNE_4049FM5 45 #define TUNER_PANASONIC_VP27 46 #define TUNER_LG_NTSC_TAPE 47 @@ -115,11 +114,11 @@ #define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */ #define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */ -#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ +#define TUNER_SAMSUNG_TCPN_2121P30A 70 /* Hauppauge PVR-500MCE NTSC */ #define TUNER_XC2028 71 #define TUNER_THOMSON_FE6600 72 /* DViCO FusionHDTV DVB-T Hybrid */ -#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ +#define TUNER_SAMSUNG_TCPG_6121P30A 73 /* Hauppauge PVR-500 PAL */ #define TUNER_TDA9887 74 /* This tuner should be used only internally */ #define TUNER_TEA5761 75 /* Only FM Radio Tuner */ #define TUNER_XC5000 76 /* Xceive Silicon Tuner */ @@ -143,57 +142,92 @@ #define TUNER_SONY_BTF_PB463Z 91 /* NTSC */ /* tv card specific */ -#define TDA9887_PRESENT (1<<0) -#define TDA9887_PORT1_INACTIVE (1<<1) -#define TDA9887_PORT2_INACTIVE (1<<2) -#define TDA9887_QSS (1<<3) -#define TDA9887_INTERCARRIER (1<<4) -#define TDA9887_PORT1_ACTIVE (1<<5) -#define TDA9887_PORT2_ACTIVE (1<<6) -#define TDA9887_INTERCARRIER_NTSC (1<<7) +#define TDA9887_PRESENT (1<<0) +#define TDA9887_PORT1_INACTIVE (1<<1) +#define TDA9887_PORT2_INACTIVE (1<<2) +#define TDA9887_QSS (1<<3) +#define TDA9887_INTERCARRIER (1<<4) +#define TDA9887_PORT1_ACTIVE (1<<5) +#define TDA9887_PORT2_ACTIVE (1<<6) +#define TDA9887_INTERCARRIER_NTSC (1<<7) /* Tuner takeover point adjustment, in dB, -16 <= top <= 15 */ -#define TDA9887_TOP_MASK (0x3f << 8) -#define TDA9887_TOP_SET (1 << 13) -#define TDA9887_TOP(top) (TDA9887_TOP_SET | (((16 + (top)) & 0x1f) << 8)) +#define TDA9887_TOP_MASK (0x3f << 8) +#define TDA9887_TOP_SET (1 << 13) +#define TDA9887_TOP(top) (TDA9887_TOP_SET | \ + (((16 + (top)) & 0x1f) << 8)) /* config options */ -#define TDA9887_DEEMPHASIS_MASK (3<<16) -#define TDA9887_DEEMPHASIS_NONE (1<<16) -#define TDA9887_DEEMPHASIS_50 (2<<16) -#define TDA9887_DEEMPHASIS_75 (3<<16) -#define TDA9887_AUTOMUTE (1<<18) +#define TDA9887_DEEMPHASIS_MASK (3<<16) +#define TDA9887_DEEMPHASIS_NONE (1<<16) +#define TDA9887_DEEMPHASIS_50 (2<<16) +#define TDA9887_DEEMPHASIS_75 (3<<16) +#define TDA9887_AUTOMUTE (1<<18) #define TDA9887_GATING_18 (1<<19) #define TDA9887_GAIN_NORMAL (1<<20) #define TDA9887_RIF_41_3 (1<<21) /* radio IF1 41.3 vs 33.3 */ +/** + * enum tuner_mode - Mode of the tuner + * + * @T_RADIO: Tuner core will work in radio mode + * @T_ANALOG_TV: Tuner core will work in analog TV mode + * + * Older boards only had a single tuner device, but some devices have a + * separate tuner for radio. In any case, the tuner-core needs to know if + * the tuner chip(s) will be used in radio mode or analog TV mode, as, on + * radio mode, frequencies are specified on a different range than on TV + * mode. This enum is used by the tuner core in order to work with the + * proper tuner range and eventually use a different tuner chip while in + * radio mode. + */ enum tuner_mode { T_RADIO = 1 << V4L2_TUNER_RADIO, T_ANALOG_TV = 1 << V4L2_TUNER_ANALOG_TV, - /* Don't need to map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */ + /* Don't map V4L2_TUNER_DIGITAL_TV, as tuner-core won't use it */ }; -/* Older boards only had a single tuner device. Nowadays multiple tuner - devices may be present on a single board. Using TUNER_SET_TYPE_ADDR - to pass the tuner_setup structure it is possible to setup each tuner - device in turn. - - Since multiple devices may be present it is no longer sufficient to - send a command to a single i2c device. Instead you should broadcast - the command to all i2c devices. - - By setting the mode_mask correctly you can select which commands are - accepted by a specific tuner device. For example, set mode_mask to - T_RADIO if the device is a radio-only tuner. That specific tuner will - only accept commands when the tuner is in radio mode and ignore them - when the tuner is set to TV mode. +/** + * struct tuner_setup - setup the tuner chipsets + * + * @addr: I2C address used to control the tuner device/chipset + * @type: Type of the tuner, as defined at the TUNER_* macros. + * Each different tuner model should have an unique + * identifier. + * @mode_mask: Mask with the allowed tuner modes: V4L2_TUNER_RADIO, + * V4L2_TUNER_ANALOG_TV and/or V4L2_TUNER_DIGITAL_TV, + * describing if the tuner should be used to support + * Radio, analog TV and/or digital TV. + * @config: Used to send tuner-specific configuration for complex + * tuners that require extra parameters to be set. + * Only a very few tuners require it and its usage on + * newer tuners should be avoided. + * @tuner_callback: Some tuners require to call back the bridge driver, + * in order to do some tasks like rising a GPIO at the + * bridge chipset, in order to do things like resetting + * the device. + * + * Older boards only had a single tuner device. Nowadays multiple tuner + * devices may be present on a single board. Using TUNER_SET_TYPE_ADDR + * to pass the tuner_setup structure it is possible to setup each tuner + * device in turn. + * + * Since multiple devices may be present it is no longer sufficient to + * send a command to a single i2c device. Instead you should broadcast + * the command to all i2c devices. + * + * By setting the mode_mask correctly you can select which commands are + * accepted by a specific tuner device. For example, set mode_mask to + * T_RADIO if the device is a radio-only tuner. That specific tuner will + * only accept commands when the tuner is in radio mode and ignore them + * when the tuner is set to TV mode. */ struct tuner_setup { - unsigned short addr; /* I2C address */ - unsigned int type; /* Tuner type */ - unsigned int mode_mask; /* Allowed tuner modes */ - void *config; /* configuraion for more complex tuners */ - int (*tuner_callback) (void *dev, int component, int cmd, int arg); + unsigned short addr; + unsigned int type; + unsigned int mode_mask; + void *config; + int (*tuner_callback)(void *dev, int component, int cmd, int arg); }; #endif /* __KERNEL__ */ diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h index f7119ee3977b..8be898739e0c 100644 --- a/include/media/tveeprom.h +++ b/include/media/tveeprom.h @@ -1,28 +1,63 @@ + /* + * tveeprom - Contains structures and functions to work with Hauppauge + * eeproms. */ +#include <linux/if_ether.h> + +/** + * enum tveeprom_audio_processor - Specifies the type of audio processor + * used on a Hauppauge device. + * + * @TVEEPROM_AUDPROC_NONE: No audio processor present + * @TVEEPROM_AUDPROC_INTERNAL: The audio processor is internal to the + * video processor + * @TVEEPROM_AUDPROC_MSP: The audio processor is a MSPXXXX device + * @TVEEPROM_AUDPROC_OTHER: The audio processor is another device + */ enum tveeprom_audio_processor { - /* No audio processor present */ TVEEPROM_AUDPROC_NONE, - /* The audio processor is internal to the video processor */ TVEEPROM_AUDPROC_INTERNAL, - /* The audio processor is a MSPXXXX device */ TVEEPROM_AUDPROC_MSP, - /* The audio processor is another device */ TVEEPROM_AUDPROC_OTHER, }; -#include <linux/if_ether.h> - +/** + * struct tveeprom - Contains the fields parsed from Hauppauge eeproms + * + * @has_radio: 1 if the device has radio; 0 otherwise. + * @has_ir: If has_ir == 0, then it is unknown what the IR + * capabilities are. Otherwise: + * bit 0) 1 (= IR capabilities are known); + * bit 1) IR receiver present; + * bit 2) IR transmitter (blaster) present. + * @has_MAC_address: 0: no MAC, 1: MAC present, 2: unknown. + * @tuner_type: type of the tuner (TUNER_*, as defined at + * include/media/tuner.h). + * @tuner_formats: Supported analog TV standards (V4L2_STD_*). + * @tuner_hauppauge_model: Hauppauge's code for the device model number. + * @tuner2_type: type of the second tuner (TUNER_*, as defined + * at include/media/tuner.h). + * @tuner2_formats: Tuner 2 supported analog TV standards + * (V4L2_STD_*). + * @tuner2_hauppauge_model: tuner 2 Hauppauge's code for the device model + * number. + * @audio_processor: analog audio decoder, as defined by enum + * tveeprom_audio_processor. + * @decoder_processor: Hauppauge's code for the decoder chipset. + * Unused by the drivers, as they probe the + * decoder based on the PCI or USB ID. + * @model: Hauppauge's model number + * @revision: Card revision number + * @serial_number: Card's serial number + * @rev_str: Card revision converted to number + * @MAC_address: MAC address for the network interface + */ struct tveeprom { u32 has_radio; - /* If has_ir == 0, then it is unknown what the IR capabilities are, - otherwise: - bit 0: 1 (= IR capabilities are known) - bit 1: IR receiver present - bit 2: IR transmitter (blaster) present */ u32 has_ir; - u32 has_MAC_address; /* 0: no MAC, 1: MAC present, 2: unknown */ + u32 has_MAC_address; u32 tuner_type; u32 tuner_formats; @@ -32,9 +67,6 @@ struct tveeprom { u32 tuner2_formats; u32 tuner2_hauppauge_model; - u32 digitizer; - u32 digitizer_formats; - u32 audio_processor; u32 decoder_processor; @@ -45,7 +77,28 @@ struct tveeprom { u8 MAC_address[ETH_ALEN]; }; +/** + * tveeprom_hauppauge_analog - Fill struct tveeprom using the contents + * of the eeprom previously filled at + * @eeprom_data field. + * + * @c: I2C client struct + * @tvee: Struct to where the eeprom parsed data will be filled; + * @eeprom_data: Array with the contents of the eeprom_data. It should + * contain 256 bytes filled with the contents of the + * eeprom read from the Hauppauge device. + */ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee, unsigned char *eeprom_data); +/** + * tveeprom_read - Reads the contents of the eeprom found at the Hauppauge + * devices. + * + * @c: I2C client struct + * @eedata: Array where the eeprom content will be stored. + * @len: Size of @eedata array. If the eeprom content will be latter + * be parsed by tveeprom_hauppauge_analog(), len should be, at + * least, 256. + */ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len); diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h index b6130b50a0f1..a209526b6014 100644 --- a/include/media/v4l2-dv-timings.h +++ b/include/media/v4l2-dv-timings.h @@ -23,7 +23,7 @@ #include <linux/videodev2.h> -/** +/* * v4l2_dv_timings_presets: list of all dv_timings presets. */ extern const struct v4l2_dv_timings v4l2_dv_timings_presets[]; @@ -127,16 +127,16 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, /** * v4l2_detect_cvt - detect if the given timings follow the CVT standard * - * @frame_height - the total height of the frame (including blanking) in lines. - * @hfreq - the horizontal frequency in Hz. - * @vsync - the height of the vertical sync in lines. - * @active_width - active width of image (does not include blanking). This + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @active_width: active width of image (does not include blanking). This * information is needed only in case of version 2 of reduced blanking. * In other cases, this parameter does not have any effect on timings. - * @polarities - the horizontal and vertical polarities (same as struct + * @polarities: the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). - * @interlaced - if this flag is true, it indicates interlaced format - * @fmt - the resulting timings. + * @interlaced: if this flag is true, it indicates interlaced format + * @fmt: the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid CVT format. If so, then it will return true, and fmt will be filled @@ -149,18 +149,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, /** * v4l2_detect_gtf - detect if the given timings follow the GTF standard * - * @frame_height - the total height of the frame (including blanking) in lines. - * @hfreq - the horizontal frequency in Hz. - * @vsync - the height of the vertical sync in lines. - * @polarities - the horizontal and vertical polarities (same as struct + * @frame_height: the total height of the frame (including blanking) in lines. + * @hfreq: the horizontal frequency in Hz. + * @vsync: the height of the vertical sync in lines. + * @polarities: the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). - * @interlaced - if this flag is true, it indicates interlaced format - * @aspect - preferred aspect ratio. GTF has no method of determining the + * @interlaced: if this flag is true, it indicates interlaced format + * @aspect: preferred aspect ratio. GTF has no method of determining the * aspect ratio in order to derive the image width from the * image height, so it has to be passed explicitly. Usually * the native screen aspect ratio is used for this. If it * is not filled in correctly, then 16:9 will be assumed. - * @fmt - the resulting timings. + * @fmt: the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid GTF format. If so, then it will return true, and fmt will be filled @@ -174,8 +174,8 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync, * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes * 0x15 and 0x16 from the EDID. * - * @hor_landscape - byte 0x15 from the EDID. - * @vert_portrait - byte 0x16 from the EDID. + * @hor_landscape: byte 0x15 from the EDID. + * @vert_portrait: byte 0x16 from the EDID. * * Determines the aspect ratio from the EDID. * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2: diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 8fbbd76d78e8..017ffb2220c7 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -36,6 +36,8 @@ struct v4l2_ioctl_ops { struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_sdr_cap) (struct file *file, void *fh, struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_sdr_out) (struct file *file, void *fh, + struct v4l2_fmtdesc *f); /* VIDIOC_G_FMT handlers */ int (*vidioc_g_fmt_vid_cap) (struct file *file, void *fh, @@ -60,6 +62,8 @@ struct v4l2_ioctl_ops { struct v4l2_format *f); int (*vidioc_g_fmt_sdr_cap) (struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_g_fmt_sdr_out) (struct file *file, void *fh, + struct v4l2_format *f); /* VIDIOC_S_FMT handlers */ int (*vidioc_s_fmt_vid_cap) (struct file *file, void *fh, @@ -84,6 +88,8 @@ struct v4l2_ioctl_ops { struct v4l2_format *f); int (*vidioc_s_fmt_sdr_cap) (struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_s_fmt_sdr_out) (struct file *file, void *fh, + struct v4l2_format *f); /* VIDIOC_TRY_FMT handlers */ int (*vidioc_try_fmt_vid_cap) (struct file *file, void *fh, @@ -108,6 +114,8 @@ struct v4l2_ioctl_ops { struct v4l2_format *f); int (*vidioc_try_fmt_sdr_cap) (struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_try_fmt_sdr_out) (struct file *file, void *fh, + struct v4l2_format *f); /* Buffer handlers */ int (*vidioc_reqbufs) (struct file *file, void *fh, struct v4l2_requestbuffers *b); diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 8849aaba6aa5..5a9597dd1ee0 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -17,7 +17,7 @@ #ifndef _MEDIA_V4L2_MEM2MEM_H #define _MEDIA_V4L2_MEM2MEM_H -#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> /** * struct v4l2_m2m_ops - mem-to-mem device driver callbacks @@ -90,7 +90,7 @@ struct v4l2_m2m_ctx { }; struct v4l2_m2m_buffer { - struct vb2_buffer vb; + struct vb2_v4l2_buffer vb; struct list_head list; }; @@ -105,9 +105,9 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx); static inline void -v4l2_m2m_buf_done(struct vb2_buffer *buf, enum vb2_buffer_state state) +v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) { - vb2_buffer_done(buf, state); + vb2_buffer_done(&buf->vb2_buf, state); } int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, @@ -160,7 +160,8 @@ static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); -void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb); +void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); /** * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 589b56c68400..647ebfe5174f 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -1,5 +1,5 @@ /* - * videobuf2-core.h - V4L2 driver helper framework + * videobuf2-core.h - Video Buffer 2 Core Framework * * Copyright (C) 2010 Samsung Electronics * @@ -15,9 +15,18 @@ #include <linux/mm_types.h> #include <linux/mutex.h> #include <linux/poll.h> -#include <linux/videodev2.h> #include <linux/dma-buf.h> +#define VB2_MAX_FRAME (32) +#define VB2_MAX_PLANES (8) + +enum vb2_memory { + VB2_MEMORY_UNKNOWN = 0, + VB2_MEMORY_MMAP = 1, + VB2_MEMORY_USERPTR = 2, + VB2_MEMORY_DMABUF = 4, +}; + struct vb2_alloc_ctx; struct vb2_fileio_data; struct vb2_threadio_data; @@ -36,6 +45,8 @@ struct vb2_threadio_data; * no other users of this buffer are present); the buf_priv * argument is the allocator private per-buffer structure * previously returned from the alloc callback. + * @get_dmabuf: acquire userspace memory for a hardware operation; used for + * DMABUF memory types. * @get_userptr: acquire userspace memory for a hardware operation; used for * USERPTR memory types; vaddr is the address passed to the * videobuf layer when queuing a video buffer of USERPTR type; @@ -111,10 +122,40 @@ struct vb2_mem_ops { int (*mmap)(void *buf_priv, struct vm_area_struct *vma); }; +/** + * struct vb2_plane - plane information + * @mem_priv: private data with this plane + * @dbuf: dma_buf - shared buffer object + * @dbuf_mapped: flag to show whether dbuf is mapped or not + * @bytesused: number of bytes occupied by data in the plane (payload) + * @length: size of this plane (NOT the payload) in bytes + * @offset: when memory in the associated struct vb2_buffer is + * VB2_MEMORY_MMAP, equals the offset from the start of + * the device memory for this plane (or is a "cookie" that + * should be passed to mmap() called on the video node) + * @userptr: when memory is VB2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane + * @fd: when memory is VB2_MEMORY_DMABUF, a userspace file + * descriptor associated with this plane + * @m: Union with memtype-specific data (@offset, @userptr or + * @fd). + * @data_offset: offset in the plane to the start of data; usually 0, + * unless there is a header in front of the data + * Should contain enough information to be able to cover all the fields + * of struct v4l2_plane at videodev2.h + */ struct vb2_plane { void *mem_priv; struct dma_buf *dbuf; unsigned int dbuf_mapped; + unsigned int bytesused; + unsigned int length; + union { + unsigned int offset; + unsigned long userptr; + int fd; + } m; + unsigned int data_offset; }; /** @@ -163,43 +204,34 @@ struct vb2_queue; /** * struct vb2_buffer - represents a video buffer - * @v4l2_buf: struct v4l2_buffer associated with this buffer; can - * be read by the driver and relevant entries can be - * changed by the driver in case of CAPTURE types - * (such as timestamp) - * @v4l2_planes: struct v4l2_planes associated with this buffer; can - * be read by the driver and relevant entries can be - * changed by the driver in case of CAPTURE types - * (such as bytesused); NOTE that even for single-planar - * types, the v4l2_planes[0] struct should be used - * instead of v4l2_buf for filling bytesused - drivers - * should use the vb2_set_plane_payload() function for that * @vb2_queue: the queue to which this driver belongs + * @index: id number of the buffer + * @type: buffer type + * @memory: the method, in which the actual data is passed * @num_planes: number of planes in the buffer * on an internal driver queue - * @state: current buffer state; do not change - * @queued_entry: entry on the queued buffers list, which holds all - * buffers queued from userspace - * @done_entry: entry on the list that stores all buffers ready to - * be dequeued to userspace * @planes: private per-plane information; do not change */ struct vb2_buffer { - struct v4l2_buffer v4l2_buf; - struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES]; - struct vb2_queue *vb2_queue; - + unsigned int index; + unsigned int type; + unsigned int memory; unsigned int num_planes; - -/* Private: internal use only */ + struct vb2_plane planes[VB2_MAX_PLANES]; + + /* private: internal use only + * + * state: current buffer state; do not change + * queued_entry: entry on the queued buffers list, which holds + * all buffers queued from userspace + * done_entry: entry on the list that stores all buffers ready + * to be dequeued to userspace + */ enum vb2_buffer_state state; struct list_head queued_entry; struct list_head done_entry; - - struct vb2_plane planes[VIDEO_MAX_PLANES]; - #ifdef CONFIG_VIDEO_ADV_DEBUG /* * Counters for how often these buffer-related ops are @@ -312,7 +344,7 @@ struct vb2_buffer { * pre-queued buffers before calling STREAMON. */ struct vb2_ops { - int (*queue_setup)(struct vb2_queue *q, const struct v4l2_format *fmt, + int (*queue_setup)(struct vb2_queue *q, const void *parg, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]); @@ -330,12 +362,19 @@ struct vb2_ops { void (*buf_queue)(struct vb2_buffer *vb); }; -struct v4l2_fh; +struct vb2_buf_ops { + int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); + int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, + struct vb2_plane *planes); + int (*set_timestamp)(struct vb2_buffer *vb, const void *pb); +}; /** * struct vb2_queue - a videobuf queue * - * @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h + * @type: private buffer type whose content is defined by the vb2-core + * caller. For example, for V4L2, it should match + * the V4L2_BUF_TYPE_* in include/uapi/linux/videodev2.h * @io_modes: supported io methods (see vb2_io_modes enum) * @fileio_read_once: report EOF after reading the first buffer * @fileio_write_immediately: queue buffer after each write() call @@ -351,10 +390,13 @@ struct v4l2_fh; * drivers to easily associate an owner filehandle with the queue. * @ops: driver-specific callbacks * @mem_ops: memory allocator specific callbacks + * @buf_ops: callbacks to deliver buffer information + * between user-space and kernel-space * @drv_priv: driver private data * @buf_struct_size: size of the driver-specific buffer structure; * "0" indicates the driver doesn't want to use a custom buffer - * structure type, so sizeof(struct vb2_buffer) will is used + * structure type. for example, sizeof(struct vb2_v4l2_buffer) + * will be used for v4l2. * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and * V4L2_BUF_FLAG_TSTAMP_SRC_* * @gfp_flags: additional gfp flags used when allocating the buffers. @@ -385,6 +427,8 @@ struct v4l2_fh; * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for * buffers. Only set for capture queues if qbuf has not yet been * called since poll() needs to return POLLERR in that situation. + * @is_multiplanar: set if buffer type is multiplanar + * @is_output: set if buffer type is output * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the * last decoded buffer was already dequeued. Set for capture queues * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued. @@ -392,17 +436,19 @@ struct v4l2_fh; * @threadio: thread io internal data, used only if thread is active */ struct vb2_queue { - enum v4l2_buf_type type; + unsigned int type; unsigned int io_modes; unsigned fileio_read_once:1; unsigned fileio_write_immediately:1; unsigned allow_zero_bytesused:1; struct mutex *lock; - struct v4l2_fh *owner; + void *owner; const struct vb2_ops *ops; const struct vb2_mem_ops *mem_ops; + const struct vb2_buf_ops *buf_ops; + void *drv_priv; unsigned int buf_struct_size; u32 timestamp_flags; @@ -411,8 +457,8 @@ struct vb2_queue { /* private: internal use only */ struct mutex mmap_lock; - enum v4l2_memory memory; - struct vb2_buffer *bufs[VIDEO_MAX_FRAME]; + unsigned int memory; + struct vb2_buffer *bufs[VB2_MAX_FRAME]; unsigned int num_buffers; struct list_head queued_list; @@ -423,13 +469,15 @@ struct vb2_queue { spinlock_t done_lock; wait_queue_head_t done_wq; - void *alloc_ctx[VIDEO_MAX_PLANES]; - unsigned int plane_sizes[VIDEO_MAX_PLANES]; + void *alloc_ctx[VB2_MAX_PLANES]; + unsigned int plane_sizes[VB2_MAX_PLANES]; unsigned int streaming:1; unsigned int start_streaming_called:1; unsigned int error:1; unsigned int waiting_for_buffers:1; + unsigned int is_multiplanar:1; + unsigned int is_output:1; unsigned int last_buffer_dequeued:1; struct vb2_fileio_data *fileio; @@ -455,23 +503,25 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); void vb2_discard_done(struct vb2_queue *q); int vb2_wait_for_all_buffers(struct vb2_queue *q); -int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); -int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); +int vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); +int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int *count); +int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int *count, const void *parg); +int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); +int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); +int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking); -int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); -int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); +int vb2_core_streamon(struct vb2_queue *q, unsigned int type); +int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); -int __must_check vb2_queue_init(struct vb2_queue *q); - -void vb2_queue_release(struct vb2_queue *q); -void vb2_queue_error(struct vb2_queue *q); +int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, + unsigned int index, unsigned int plane, unsigned int flags); -int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); -int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); -int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); +int vb2_core_queue_init(struct vb2_queue *q); +void vb2_core_queue_release(struct vb2_queue *q); -int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); -int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); +void vb2_queue_error(struct vb2_queue *q); int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); #ifndef CONFIG_MMU @@ -481,41 +531,6 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned long pgoff, unsigned long flags); #endif -unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); -size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, - loff_t *ppos, int nonblock); -size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, - loff_t *ppos, int nonblock); - -/* - * vb2_thread_fnc - callback function for use with vb2_thread - * - * This is called whenever a buffer is dequeued in the thread. - */ -typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); - -/** - * vb2_thread_start() - start a thread for the given queue. - * @q: videobuf queue - * @fnc: callback function - * @priv: priv pointer passed to the callback function - * @thread_name:the name of the thread. This will be prefixed with "vb2-". - * - * This starts a thread that will queue and dequeue until an error occurs - * or @vb2_thread_stop is called. - * - * This function should not be used for anything else but the videobuf2-dvb - * support. If you think you have another good use-case for this, then please - * contact the linux-media mailinglist first. - */ -int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, - const char *thread_name); - -/** - * vb2_thread_stop() - stop the thread for the given queue. - * @q: videobuf queue - */ -int vb2_thread_stop(struct vb2_queue *q); /** * vb2_is_streaming() - return streaming status of the queue @@ -573,7 +588,7 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb, unsigned int plane_no, unsigned long size) { if (plane_no < vb->num_planes) - vb->v4l2_planes[plane_no].bytesused = size; + vb->planes[plane_no].bytesused = size; } /** @@ -585,7 +600,7 @@ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, unsigned int plane_no) { if (plane_no < vb->num_planes) - return vb->v4l2_planes[plane_no].bytesused; + return vb->planes[plane_no].bytesused; return 0; } @@ -598,7 +613,7 @@ static inline unsigned long vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) { if (plane_no < vb->num_planes) - return vb->v4l2_planes[plane_no].length; + return vb->planes[plane_no].length; return 0; } @@ -620,48 +635,4 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) q->last_buffer_dequeued = false; } -/* - * The following functions are not part of the vb2 core API, but are simple - * helper functions that you can use in your struct v4l2_file_operations, - * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock - * or video_device->lock is set, and they will set and test vb2_queue->owner - * to check if the calling filehandle is permitted to do the queuing operation. - */ - -/* struct v4l2_ioctl_ops helpers */ - -int vb2_ioctl_reqbufs(struct file *file, void *priv, - struct v4l2_requestbuffers *p); -int vb2_ioctl_create_bufs(struct file *file, void *priv, - struct v4l2_create_buffers *p); -int vb2_ioctl_prepare_buf(struct file *file, void *priv, - struct v4l2_buffer *p); -int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); -int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); -int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); -int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); -int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); -int vb2_ioctl_expbuf(struct file *file, void *priv, - struct v4l2_exportbuffer *p); - -/* struct v4l2_file_operations helpers */ - -int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); -int vb2_fop_release(struct file *file); -int _vb2_fop_release(struct file *file, struct mutex *lock); -ssize_t vb2_fop_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos); -ssize_t vb2_fop_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos); -unsigned int vb2_fop_poll(struct file *file, poll_table *wait); -#ifndef CONFIG_MMU -unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags); -#endif - -/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */ - -void vb2_ops_wait_prepare(struct vb2_queue *vq); -void vb2_ops_wait_finish(struct vb2_queue *vq); - #endif /* _MEDIA_VIDEOBUF2_CORE_H */ diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h index 8197f87d6c61..c33dfa69d7ab 100644 --- a/include/media/videobuf2-dma-contig.h +++ b/include/media/videobuf2-dma-contig.h @@ -13,7 +13,7 @@ #ifndef _MEDIA_VIDEOBUF2_DMA_CONTIG_H #define _MEDIA_VIDEOBUF2_DMA_CONTIG_H -#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> #include <linux/dma-mapping.h> static inline dma_addr_t diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h index 14ce3068b642..8d1083f83c3d 100644 --- a/include/media/videobuf2-dma-sg.h +++ b/include/media/videobuf2-dma-sg.h @@ -13,7 +13,7 @@ #ifndef _MEDIA_VIDEOBUF2_DMA_SG_H #define _MEDIA_VIDEOBUF2_DMA_SG_H -#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> static inline struct sg_table *vb2_dma_sg_plane_desc( struct vb2_buffer *vb, unsigned int plane_no) diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h index 8f61456f1394..5b64c9eac2c9 100644 --- a/include/media/videobuf2-dvb.h +++ b/include/media/videobuf2-dvb.h @@ -6,7 +6,13 @@ #include <dvb_demux.h> #include <dvb_net.h> #include <dvb_frontend.h> -#include <media/videobuf2-core.h> + +#include <media/videobuf2-v4l2.h> +/* + * TODO: This header file should be replaced with videobuf2-core.h + * Currently, vb2_thread is not a stuff of videobuf2-core, + * since vb2_thread has many dependencies on videobuf2-v4l2. + */ struct vb2_dvb { /* filling that the job of the driver */ diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h index 6513c7ec3116..36565c7acb54 100644 --- a/include/media/videobuf2-memops.h +++ b/include/media/videobuf2-memops.h @@ -14,7 +14,7 @@ #ifndef _MEDIA_VIDEOBUF2_MEMOPS_H #define _MEDIA_VIDEOBUF2_MEMOPS_H -#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> #include <linux/mm.h> /** diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h new file mode 100644 index 000000000000..5abab1e7c7e8 --- /dev/null +++ b/include/media/videobuf2-v4l2.h @@ -0,0 +1,149 @@ +/* + * videobuf2-v4l2.h - V4L2 driver helper framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak <pawel@osciak.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _MEDIA_VIDEOBUF2_V4L2_H +#define _MEDIA_VIDEOBUF2_V4L2_H + +#include <linux/videodev2.h> +#include <media/videobuf2-core.h> + +#if VB2_MAX_FRAME != VIDEO_MAX_FRAME +#error VB2_MAX_FRAME != VIDEO_MAX_FRAME +#endif + +#if VB2_MAX_PLANES != VIDEO_MAX_PLANES +#error VB2_MAX_PLANES != VIDEO_MAX_PLANES +#endif + +/** + * struct vb2_v4l2_buffer - video buffer information for v4l2 + * @vb2_buf: video buffer 2 + * @flags: buffer informational flags + * @field: enum v4l2_field; field order of the image in the buffer + * @timestamp: frame timestamp + * @timecode: frame timecode + * @sequence: sequence count of this frame + * Should contain enough information to be able to cover all the fields + * of struct v4l2_buffer at videodev2.h + */ +struct vb2_v4l2_buffer { + struct vb2_buffer vb2_buf; + + __u32 flags; + __u32 field; + struct timeval timestamp; + struct v4l2_timecode timecode; + __u32 sequence; +}; + +/* + * to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer * + */ +#define to_vb2_v4l2_buffer(vb) \ + container_of(vb, struct vb2_v4l2_buffer, vb2_buf) + +int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); + +int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); +int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); + +int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); +int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); + +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); + +int __must_check vb2_queue_init(struct vb2_queue *q); +void vb2_queue_release(struct vb2_queue *q); + +unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock); +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblock); + +/* + * vb2_thread_fnc - callback function for use with vb2_thread + * + * This is called whenever a buffer is dequeued in the thread. + */ +typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); + +/** + * vb2_thread_start() - start a thread for the given queue. + * @q: videobuf queue + * @fnc: callback function + * @priv: priv pointer passed to the callback function + * @thread_name:the name of the thread. This will be prefixed with "vb2-". + * + * This starts a thread that will queue and dequeue until an error occurs + * or @vb2_thread_stop is called. + * + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailinglist first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name); + +/** + * vb2_thread_stop() - stop the thread for the given queue. + * @q: videobuf queue + */ +int vb2_thread_stop(struct vb2_queue *q); + +/* + * The following functions are not part of the vb2 core API, but are simple + * helper functions that you can use in your struct v4l2_file_operations, + * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock + * or video_device->lock is set, and they will set and test vb2_queue->owner + * to check if the calling filehandle is permitted to do the queuing operation. + */ + +/* struct v4l2_ioctl_ops helpers */ + +int vb2_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p); +int vb2_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p); +int vb2_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *p); +int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p); +int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i); +int vb2_ioctl_expbuf(struct file *file, void *priv, + struct v4l2_exportbuffer *p); + +/* struct v4l2_file_operations helpers */ + +int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma); +int vb2_fop_release(struct file *file); +int _vb2_fop_release(struct file *file, struct mutex *lock); +ssize_t vb2_fop_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); +ssize_t vb2_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); +unsigned int vb2_fop_poll(struct file *file, poll_table *wait); +#ifndef CONFIG_MMU +unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); +#endif + +/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */ + +void vb2_ops_wait_prepare(struct vb2_queue *vq); +void vb2_ops_wait_finish(struct vb2_queue *vq); + +#endif /* _MEDIA_VIDEOBUF2_V4L2_H */ diff --git a/include/media/videobuf2-vmalloc.h b/include/media/videobuf2-vmalloc.h index 93a76b43038d..a63fe662140a 100644 --- a/include/media/videobuf2-vmalloc.h +++ b/include/media/videobuf2-vmalloc.h @@ -13,7 +13,7 @@ #ifndef _MEDIA_VIDEOBUF2_VMALLOC_H #define _MEDIA_VIDEOBUF2_VMALLOC_H -#include <media/videobuf2-core.h> +#include <media/videobuf2-v4l2.h> extern const struct vb2_mem_ops vb2_vmalloc_memops; diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index a2f59ec98d24..cf3bc564ac03 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -56,146 +56,37 @@ #include <net/ipv6.h> #include <net/net_namespace.h> -#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ -#define UIP_IPH_LEN 40 /* ipv6 fixed header size */ -#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ -#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */ +#define EUI64_ADDR_LEN 8 -/* - * ipv6 address based on mac - * second bit-flip (Universe/Local) is done according RFC2464 +#define LOWPAN_NHC_MAX_ID_LEN 1 +/* Maximum next header compression length which we currently support inclusive + * possible inline data. */ -#define is_addr_mac_addr_based(a, m) \ - ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \ - (((a)->s6_addr[9]) == (m)[1]) && \ - (((a)->s6_addr[10]) == (m)[2]) && \ - (((a)->s6_addr[11]) == (m)[3]) && \ - (((a)->s6_addr[12]) == (m)[4]) && \ - (((a)->s6_addr[13]) == (m)[5]) && \ - (((a)->s6_addr[14]) == (m)[6]) && \ - (((a)->s6_addr[15]) == (m)[7])) - -/* - * check whether we can compress the IID to 16 bits, - * it's possible for unicast adresses with first 49 bits are zero only. - */ -#define lowpan_is_iid_16_bit_compressable(a) \ - ((((a)->s6_addr16[4]) == 0) && \ - (((a)->s6_addr[10]) == 0) && \ - (((a)->s6_addr[11]) == 0xff) && \ - (((a)->s6_addr[12]) == 0xfe) && \ - (((a)->s6_addr[13]) == 0)) - -/* check whether the 112-bit gid of the multicast address is mappable to: */ - -/* 48 bits, FFXX::00XX:XXXX:XXXX */ -#define lowpan_is_mcast_addr_compressable48(a) \ - ((((a)->s6_addr16[1]) == 0) && \ - (((a)->s6_addr16[2]) == 0) && \ - (((a)->s6_addr16[3]) == 0) && \ - (((a)->s6_addr16[4]) == 0) && \ - (((a)->s6_addr[10]) == 0)) - -/* 32 bits, FFXX::00XX:XXXX */ -#define lowpan_is_mcast_addr_compressable32(a) \ - ((((a)->s6_addr16[1]) == 0) && \ - (((a)->s6_addr16[2]) == 0) && \ - (((a)->s6_addr16[3]) == 0) && \ - (((a)->s6_addr16[4]) == 0) && \ - (((a)->s6_addr16[5]) == 0) && \ - (((a)->s6_addr[12]) == 0)) - -/* 8 bits, FF02::00XX */ -#define lowpan_is_mcast_addr_compressable8(a) \ - ((((a)->s6_addr[1]) == 2) && \ - (((a)->s6_addr16[1]) == 0) && \ - (((a)->s6_addr16[2]) == 0) && \ - (((a)->s6_addr16[3]) == 0) && \ - (((a)->s6_addr16[4]) == 0) && \ - (((a)->s6_addr16[5]) == 0) && \ - (((a)->s6_addr16[6]) == 0) && \ - (((a)->s6_addr[14]) == 0)) - -#define lowpan_is_addr_broadcast(a) \ - ((((a)[0]) == 0xFF) && \ - (((a)[1]) == 0xFF) && \ - (((a)[2]) == 0xFF) && \ - (((a)[3]) == 0xFF) && \ - (((a)[4]) == 0xFF) && \ - (((a)[5]) == 0xFF) && \ - (((a)[6]) == 0xFF) && \ - (((a)[7]) == 0xFF)) - -#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */ -#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */ -#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */ -#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */ -#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */ - -#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */ - -#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */ - -#define LOWPAN_FRAG1_HEAD_SIZE 0x4 -#define LOWPAN_FRAGN_HEAD_SIZE 0x5 - -/* - * Values of fields within the IPHC encoding first byte - * (C stands for compressed and I for inline) +#define LOWPAN_NHC_MAX_HDR_LEN (sizeof(struct udphdr)) +/* Max IPHC Header len without IPv6 hdr specific inline data. + * Useful for getting the "extra" bytes we need at worst case compression. + * + * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN */ -#define LOWPAN_IPHC_TF 0x18 - -#define LOWPAN_IPHC_FL_C 0x10 -#define LOWPAN_IPHC_TC_C 0x08 -#define LOWPAN_IPHC_NH_C 0x04 -#define LOWPAN_IPHC_TTL_1 0x01 -#define LOWPAN_IPHC_TTL_64 0x02 -#define LOWPAN_IPHC_TTL_255 0x03 -#define LOWPAN_IPHC_TTL_I 0x00 - +#define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN) +/* Maximum worst case IPHC header buffer size */ +#define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \ + LOWPAN_IPHC_MAX_HEADER_LEN + \ + LOWPAN_NHC_MAX_HDR_LEN) -/* Values of fields within the IPHC encoding second byte */ -#define LOWPAN_IPHC_CID 0x80 +#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */ +#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */ +#define LOWPAN_DISPATCH_IPHC_MASK 0xe0 -#define LOWPAN_IPHC_ADDR_00 0x00 -#define LOWPAN_IPHC_ADDR_01 0x01 -#define LOWPAN_IPHC_ADDR_02 0x02 -#define LOWPAN_IPHC_ADDR_03 0x03 - -#define LOWPAN_IPHC_SAC 0x40 -#define LOWPAN_IPHC_SAM 0x30 - -#define LOWPAN_IPHC_SAM_BIT 4 - -#define LOWPAN_IPHC_M 0x08 -#define LOWPAN_IPHC_DAC 0x04 -#define LOWPAN_IPHC_DAM_00 0x00 -#define LOWPAN_IPHC_DAM_01 0x01 -#define LOWPAN_IPHC_DAM_10 0x02 -#define LOWPAN_IPHC_DAM_11 0x03 - -#define LOWPAN_IPHC_DAM_BIT 0 -/* - * LOWPAN_UDP encoding (works together with IPHC) - */ -#define LOWPAN_NHC_UDP_MASK 0xF8 -#define LOWPAN_NHC_UDP_ID 0xF0 -#define LOWPAN_NHC_UDP_CHECKSUMC 0x04 -#define LOWPAN_NHC_UDP_CHECKSUMI 0x00 - -#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0 -#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0 -#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000 -#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00 +static inline bool lowpan_is_ipv6(u8 dispatch) +{ + return dispatch == LOWPAN_DISPATCH_IPV6; +} -/* values for port compression, _with checksum_ ie bit 5 set to 0 */ -#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */ -#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline, - dest = 0xF0 + 8 bit inline */ -#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline, - dest = 16 bit inline */ -#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */ -#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */ +static inline bool lowpan_is_iphc(u8 dispatch) +{ + return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC; +} #define LOWPAN_PRIV_SIZE(llpriv_size) \ (sizeof(struct lowpan_priv) + llpriv_size) @@ -218,10 +109,23 @@ struct lowpan_priv *lowpan_priv(const struct net_device *dev) return netdev_priv(dev); } +struct lowpan_802154_cb { + u16 d_tag; + unsigned int d_size; + u8 d_offset; +}; + +static inline +struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb)); + return (struct lowpan_802154_cb *)skb->cb; +} + #ifdef DEBUG /* print data in line */ static inline void raw_dump_inline(const char *caller, char *msg, - unsigned char *buf, int len) + const unsigned char *buf, int len) { if (msg) pr_debug("%s():%s: ", caller, msg); @@ -236,7 +140,7 @@ static inline void raw_dump_inline(const char *caller, char *msg, * ... */ static inline void raw_dump_table(const char *caller, char *msg, - unsigned char *buf, int len) + const unsigned char *buf, int len) { if (msg) pr_debug("%s():%s:\n", caller, msg); @@ -245,24 +149,25 @@ static inline void raw_dump_table(const char *caller, char *msg, } #else static inline void raw_dump_table(const char *caller, char *msg, - unsigned char *buf, int len) { } + const unsigned char *buf, int len) { } static inline void raw_dump_inline(const char *caller, char *msg, - unsigned char *buf, int len) { } + const unsigned char *buf, int len) { } #endif -static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) -{ - if (unlikely(!pskb_may_pull(skb, 1))) - return -EINVAL; - - *val = skb->data[0]; - skb_pull(skb, 1); - - return 0; -} - -static inline bool lowpan_fetch_skb(struct sk_buff *skb, - void *data, const unsigned int len) +/** + * lowpan_fetch_skb - getting inline data from 6LoWPAN header + * + * This function will pull data from sk buffer and put it into data to + * remove the 6LoWPAN inline data. This function returns true if the + * sk buffer is too small to pull the amount of data which is specified + * by len. + * + * @skb: the buffer where the inline data should be pulled from. + * @data: destination buffer for the inline data. + * @len: amount of data which should be pulled in bytes. + */ +static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data, + unsigned int len) { if (unlikely(!pskb_may_pull(skb, len))) return true; @@ -280,129 +185,44 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data, *hc_ptr += len; } -static inline u8 lowpan_addr_mode_size(const u8 addr_mode) -{ - static const u8 addr_sizes[] = { - [LOWPAN_IPHC_ADDR_00] = 16, - [LOWPAN_IPHC_ADDR_01] = 8, - [LOWPAN_IPHC_ADDR_02] = 2, - [LOWPAN_IPHC_ADDR_03] = 0, - }; - return addr_sizes[addr_mode]; -} - -static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header) -{ - u8 ret = 1; - - if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { - *uncomp_header += sizeof(struct udphdr); - - switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) { - case LOWPAN_NHC_UDP_CS_P_00: - ret += 4; - break; - case LOWPAN_NHC_UDP_CS_P_01: - case LOWPAN_NHC_UDP_CS_P_10: - ret += 3; - break; - case LOWPAN_NHC_UDP_CS_P_11: - ret++; - break; - default: - break; - } - - if (!(h_enc & LOWPAN_NHC_UDP_CS_C)) - ret += 2; - } - - return ret; -} +void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype); /** - * lowpan_uncompress_size - returns skb->len size with uncompressed header - * @skb: sk_buff with 6lowpan header inside - * @datagram_offset: optional to get the datagram_offset value + * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header * - * Returns the skb->len with uncompressed header + * This function replaces the IPHC 6LoWPAN header which should be pointed at + * skb->data and skb_network_header, with the IPv6 header. + * It would be nice that the caller have the necessary headroom of IPv6 header + * and greatest Transport layer header, this would reduce the overhead for + * reallocate headroom. + * + * @skb: the buffer which should be manipulate. + * @dev: the lowpan net device pointer. + * @daddr: destination lladdr of mac header which is used for compression + * methods. + * @saddr: source lladdr of mac header which is used for compression + * methods. */ -static inline u16 -lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) -{ - u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr); - u8 iphc0, iphc1, h_enc; - - iphc0 = skb_network_header(skb)[0]; - iphc1 = skb_network_header(skb)[1]; - - switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) { - case 0: - ret += 4; - break; - case 1: - ret += 3; - break; - case 2: - ret++; - break; - default: - break; - } - - if (!(iphc0 & LOWPAN_IPHC_NH_C)) - ret++; - - if (!(iphc0 & 0x03)) - ret++; - - ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >> - LOWPAN_IPHC_SAM_BIT); - - if (iphc1 & LOWPAN_IPHC_M) { - switch ((iphc1 & LOWPAN_IPHC_DAM_11) >> - LOWPAN_IPHC_DAM_BIT) { - case LOWPAN_IPHC_DAM_00: - ret += 16; - break; - case LOWPAN_IPHC_DAM_01: - ret += 6; - break; - case LOWPAN_IPHC_DAM_10: - ret += 4; - break; - case LOWPAN_IPHC_DAM_11: - ret++; - break; - default: - break; - } - } else { - ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >> - LOWPAN_IPHC_DAM_BIT); - } - - if (iphc0 & LOWPAN_IPHC_NH_C) { - h_enc = skb_network_header(skb)[ret]; - ret += lowpan_next_hdr_size(h_enc, &uncomp_header); - } +int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev, + const void *daddr, const void *saddr); - if (dgram_offset) - *dgram_offset = uncomp_header; - - return skb->len + uncomp_header - ret; -} - -void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype); - -int -lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, - const u8 *saddr, const u8 saddr_type, - const u8 saddr_len, const u8 *daddr, - const u8 daddr_type, const u8 daddr_len, - u8 iphc0, u8 iphc1); -int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *_daddr, - const void *_saddr, unsigned int len); +/** + * lowpan_header_compress - replace IPv6 header with 6LoWPAN header + * + * This function replaces the IPv6 header which should be pointed at + * skb->data and skb_network_header, with the IPHC 6LoWPAN header. + * The caller need to be sure that the sk buffer is not shared and at have + * at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN, + * which is the IPHC "more bytes than IPv6 header" at worst case. + * + * @skb: the buffer which should be manipulate. + * @dev: the lowpan net device pointer. + * @daddr: destination lladdr of mac header which is used for compression + * methods. + * @saddr: source lladdr of mac header which is used for compression + * methods. + */ +int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev, + const void *daddr, const void *saddr); #endif /* __6LOWPAN_H__ */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index b5474b1fcd83..78003dfb8539 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -192,8 +192,7 @@ struct ipv6_stub { int (*ipv6_dst_lookup)(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); void (*udpv6_encap_enable)(void); - void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh, - const struct in6_addr *daddr, + void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, bool router, bool solicited, bool override, bool inc_opt); struct neigh_table *nd_tbl; diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h index 7d38e2ffd256..a5563d27a3eb 100644 --- a/include/net/af_ieee802154.h +++ b/include/net/af_ieee802154.h @@ -1,5 +1,5 @@ /* - * IEEE 802.15.4 inteface for userspace + * IEEE 802.15.4 interface for userspace * * Copyright 2007, 2008 Siemens AG * diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..b36d837c701e 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -63,7 +63,11 @@ struct unix_sock { #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; }; -#define unix_sk(__sk) ((struct unix_sock *)__sk) + +static inline struct unix_sock *unix_sk(const struct sock *sk) +{ + return (struct unix_sock *)sk; +} #define peer_wait peer_wq.wait diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index db639a4c5ab8..e9eb2d6791b3 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -22,6 +22,9 @@ #include "vsock_addr.h" +/* vsock-specific sock->sk_state constants */ +#define VSOCK_SS_LISTEN 255 + #define LAST_RESERVED_PORT 1023 #define vsock_sk(__sk) ((struct vsock_sock *)__sk) diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 38d8a34d3589..42844d7b154a 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -122,12 +122,28 @@ struct bt_voice { __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) +void bt_warn(const char *fmt, ...); +__printf(1, 2) void bt_err(const char *fmt, ...); +__printf(1, 2) +void bt_err_ratelimited(const char *fmt, ...); #define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) +#define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__) #define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) #define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) +#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__) + +#define bt_dev_info(hdev, fmt, ...) \ + BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__) +#define bt_dev_warn(hdev, fmt, ...) \ + BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__) +#define bt_dev_err(hdev, fmt, ...) \ + BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__) +#define bt_dev_dbg(hdev, fmt, ...) \ + BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + /* Connection and socket states */ enum { BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */ @@ -280,22 +296,22 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb); -struct req_ctrl { - bool start; - u8 event; - hci_req_complete_t complete; - hci_req_complete_skb_t complete_skb; +struct hci_ctrl { + __u16 opcode; + bool req_start; + u8 req_event; + hci_req_complete_t req_complete; + hci_req_complete_skb_t req_complete_skb; }; struct bt_skb_cb { __u8 pkt_type; __u8 force_active; - __u16 opcode; __u16 expect; __u8 incoming:1; union { struct l2cap_ctrl l2cap; - struct req_ctrl req; + struct hci_ctrl hci; }; }; #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 7ca6690355ea..0205b80cc90b 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -44,6 +44,9 @@ #define HCI_DEV_DOWN 4 #define HCI_DEV_SUSPEND 5 #define HCI_DEV_RESUME 6 +#define HCI_DEV_OPEN 7 +#define HCI_DEV_CLOSE 8 +#define HCI_DEV_SETUP 9 /* HCI notify events */ #define HCI_NOTIFY_CONN_ADD 1 @@ -168,6 +171,15 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_SIMULTANEOUS_DISCOVERY, + + /* When this quirk is set, the enabling of diagnostic mode is + * not persistent over HCI Reset. Every time the controller + * is brought up it needs to be reprogrammed. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_NON_PERSISTENT_DIAG, }; /* HCI device flags */ @@ -238,6 +250,7 @@ enum { HCI_LE_SCAN_INTERRUPTED, HCI_DUT_MODE, + HCI_VENDOR_DIAG, HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, @@ -260,6 +273,7 @@ enum { #define HCI_ACLDATA_PKT 0x02 #define HCI_SCODATA_PKT 0x03 #define HCI_EVENT_PKT 0x04 +#define HCI_DIAG_PKT 0xf0 #define HCI_VENDOR_PKT 0xff /* HCI packet types */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 9e1a59e01fa2..1878d0a96333 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -398,6 +398,8 @@ struct hci_dev { int (*send)(struct hci_dev *hdev, struct sk_buff *skb); void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*hw_error)(struct hci_dev *hdev, u8 code); + int (*post_init)(struct hci_dev *hdev); + int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); }; @@ -469,6 +471,7 @@ struct hci_conn { struct delayed_work auto_accept_work; struct delayed_work idle_work; struct delayed_work le_conn_timeout; + struct work_struct le_scan_cleanup; struct device dev; struct dentry *debugfs; @@ -791,6 +794,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, return NULL; } +static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, + bdaddr_t *ba, + __u8 ba_type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != LE_LINK) + continue; + + if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { @@ -987,6 +1014,7 @@ int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); +int hci_dev_do_close(struct hci_dev *hdev); int hci_dev_reset(__u16 dev); int hci_dev_reset_stat(__u16 dev); int hci_dev_cmd(unsigned int cmd, void __user *arg); @@ -1014,9 +1042,6 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev); struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type); -struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev, - bdaddr_t *addr, - u8 addr_type); void hci_uuids_clear(struct hci_dev *hdev); @@ -1065,6 +1090,7 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); +int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); void hci_conn_init_sysfs(struct hci_conn *conn); @@ -1348,6 +1374,9 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); +struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout); + /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, @@ -1452,7 +1481,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, void mgmt_discovering(struct hci_dev *hdev, u8 discovering); bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); -void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); +void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent); void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h index 77d1e5764185..2b67567cf28d 100644 --- a/include/net/bluetooth/hci_mon.h +++ b/include/net/bluetooth/hci_mon.h @@ -39,6 +39,10 @@ struct hci_mon_hdr { #define HCI_MON_ACL_RX_PKT 5 #define HCI_MON_SCO_TX_PKT 6 #define HCI_MON_SCO_RX_PKT 7 +#define HCI_MON_OPEN_INDEX 8 +#define HCI_MON_CLOSE_INDEX 9 +#define HCI_MON_INDEX_INFO 10 +#define HCI_MON_VENDOR_DIAG 11 struct hci_mon_new_index { __u8 type; @@ -48,4 +52,10 @@ struct hci_mon_new_index { } __packed; #define HCI_MON_NEW_INDEX_SIZE 16 +struct hci_mon_index_info { + bdaddr_t bdaddr; + __le16 manufacturer; +} __packed; +#define HCI_MON_INDEX_INFO_SIZE 8 + #endif /* __HCI_MON_H */ diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c2a40a172fcd..f1fbc3b11962 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -297,8 +297,7 @@ void bond_3ad_bind_slave(struct slave *slave); void bond_3ad_unbind_slave(struct slave *slave); void bond_3ad_state_machine_handler(struct work_struct *); void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout); -void bond_3ad_adapter_speed_changed(struct slave *slave); -void bond_3ad_adapter_duplex_changed(struct slave *slave); +void bond_3ad_adapter_speed_duplex_changed(struct slave *slave); void bond_3ad_handle_link_change(struct slave *slave, char link); int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); int __bond_3ad_get_active_agg_info(struct bonding *bond, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f0889a247643..2c7bdb81d30c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5,6 +5,7 @@ * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -858,6 +859,8 @@ struct station_del_parameters { /** * enum cfg80211_station_type - the type of station being modified * @CFG80211_STA_AP_CLIENT: client of an AP interface + * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still + * unassociated (update properties for this type of client is permitted) * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has * the AP MLME in the device * @CFG80211_STA_AP_STA: AP station on managed interface @@ -873,6 +876,7 @@ struct station_del_parameters { */ enum cfg80211_station_type { CFG80211_STA_AP_CLIENT, + CFG80211_STA_AP_CLIENT_UNASSOC, CFG80211_STA_AP_MLME_CLIENT, CFG80211_STA_AP_STA, CFG80211_STA_IBSS, @@ -1498,13 +1502,26 @@ struct cfg80211_match_set { }; /** + * struct cfg80211_sched_scan_plan - scan plan for scheduled scan + * + * @interval: interval between scheduled scan iterations. In seconds. + * @iterations: number of scan iterations in this scan plan. Zero means + * infinite loop. + * The last scan plan will always have this parameter set to zero, + * all other scan plans will have a finite number of iterations. + */ +struct cfg80211_sched_scan_plan { + u32 interval; + u32 iterations; +}; + +/** * struct cfg80211_sched_scan_request - scheduled scan request description * * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans) * @n_ssids: number of SSIDs * @n_channels: total number of channels to scan * @scan_width: channel width for scanning - * @interval: interval between each scheduled scan cycle * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets * @flags: bit field of flags controlling operation @@ -1523,6 +1540,9 @@ struct cfg80211_match_set { * @mac_addr_mask: MAC address mask used with randomisation, bits that * are 0 in the mask should be randomised, bits that are 1 should * be taken from the @mac_addr + * @scan_plans: scan plans to be executed in this scheduled scan. Lowest + * index must be executed first. + * @n_scan_plans: number of scan plans, at least 1. * @rcu_head: RCU callback used to free the struct * @owner_nlportid: netlink portid of owner (if this should is a request * owned by a particular socket) @@ -1536,7 +1556,6 @@ struct cfg80211_sched_scan_request { int n_ssids; u32 n_channels; enum nl80211_bss_scan_width scan_width; - u32 interval; const u8 *ie; size_t ie_len; u32 flags; @@ -1544,6 +1563,8 @@ struct cfg80211_sched_scan_request { int n_match_sets; s32 min_rssi_thold; u32 delay; + struct cfg80211_sched_scan_plan *scan_plans; + int n_scan_plans; u8 mac_addr[ETH_ALEN] __aligned(2); u8 mac_addr_mask[ETH_ALEN] __aligned(2); @@ -1573,6 +1594,26 @@ enum cfg80211_signal_type { }; /** + * struct cfg80211_inform_bss - BSS inform data + * @chan: channel the frame was received on + * @scan_width: scan width that was used + * @signal: signal strength value, according to the wiphy's + * signal type + * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was + * received; should match the time when the frame was actually + * received by the device (not just by the host, in case it was + * buffered on the device) and be accurate to about 10ms. + * If the frame isn't buffered, just passing the return value of + * ktime_get_boot_ns() is likely appropriate. + */ +struct cfg80211_inform_bss { + struct ieee80211_channel *chan; + enum nl80211_bss_scan_width scan_width; + s32 signal; + u64 boottime_ns; +}; + +/** * struct cfg80211_bss_ie_data - BSS entry IE data * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing @@ -2358,6 +2399,10 @@ struct cfg80211_qos_map { * @set_power_mgmt: Configure WLAN power management. A timeout value of -1 * allows the driver to adjust the dynamic ps timeout value. * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. + * After configuration, the driver should (soon) send an event indicating + * the current level is above/below the configured threshold; this may + * need some care when the configuration is changed (without first being + * disabled.) * @set_cqm_txe_config: Configure connection quality monitor TX error * thresholds. * @sched_scan_start: Tell the driver to start a scheduled scan. @@ -2971,12 +3016,21 @@ enum wiphy_vendor_command_flags { * @doit: callback for the operation, note that wdev is %NULL if the * flags didn't ask for a wdev and non-%NULL otherwise; the data * pointer may be %NULL if userspace provided no data at all + * @dumpit: dump callback, for transferring bigger/multiple items. The + * @storage points to cb->args[5], ie. is preserved over the multiple + * dumpit calls. + * It's recommended to not have the same sub command with both @doit and + * @dumpit, so that userspace can assume certain ones are get and others + * are used with dump requests. */ struct wiphy_vendor_command { struct nl80211_vendor_cmd_info info; u32 flags; int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len); + int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev, + struct sk_buff *skb, const void *data, int data_len, + unsigned long *storage); }; /** @@ -3044,6 +3098,12 @@ struct wiphy_vendor_command { * include fixed IEs like supported rates * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled * scans + * @max_sched_scan_plans: maximum number of scan plans (scan interval and number + * of iterations) for scheduled scan supported by the device. + * @max_sched_scan_plan_interval: maximum interval (in seconds) for a + * single scan plan supported by the device. + * @max_sched_scan_plan_iterations: maximum number of iterations for a single + * scan plan supported by the device. * @coverage_class: current coverage class * @fw_version: firmware version for ethtool reporting * @hw_version: hardware version for ethtool reporting @@ -3151,6 +3211,9 @@ struct wiphy { u8 max_match_sets; u16 max_scan_ie_len; u16 max_sched_scan_ie_len; + u32 max_sched_scan_plans; + u32 max_sched_scan_plan_interval; + u32 max_sched_scan_plan_iterations; int n_cipher_suites; const u32 *cipher_suites; @@ -3946,14 +4009,11 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy); void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); /** - * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame - * + * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame * @wiphy: the wiphy reporting the BSS - * @rx_channel: The channel the frame was received on - * @scan_width: width of the control channel + * @data: the BSS metadata * @mgmt: the management frame (probe response or beacon) * @len: length of the management frame - * @signal: the signal strength, type depends on the wiphy's signal_type * @gfp: context flags * * This informs cfg80211 that BSS information was found and @@ -3963,11 +4023,26 @@ void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); * Or %NULL on error. */ struct cfg80211_bss * __must_check +cfg80211_inform_bss_frame_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + struct ieee80211_mgmt *mgmt, size_t len, + gfp_t gfp); + +static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_width_frame(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, struct ieee80211_mgmt *mgmt, size_t len, - s32 signal, gfp_t gfp); + s32 signal, gfp_t gfp) +{ + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = scan_width, + .signal = signal, + }; + + return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); +} static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_frame(struct wiphy *wiphy, @@ -3975,9 +4050,13 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len, s32 signal, gfp_t gfp) { - return cfg80211_inform_bss_width_frame(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, - mgmt, len, signal, gfp); + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = NL80211_BSS_CHAN_WIDTH_20, + .signal = signal, + }; + + return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp); } /** @@ -3994,11 +4073,10 @@ enum cfg80211_bss_frame_type { }; /** - * cfg80211_inform_bss_width - inform cfg80211 of a new BSS + * cfg80211_inform_bss_data - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS - * @rx_channel: The channel the frame was received on - * @scan_width: width of the control channel + * @data: the BSS metadata * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) @@ -4006,7 +4084,6 @@ enum cfg80211_bss_frame_type { * @beacon_interval: the beacon interval announced by the peer * @ie: additional IEs sent by the peer * @ielen: length of the additional IEs - * @signal: the signal strength, type depends on the wiphy's signal_type * @gfp: context flags * * This informs cfg80211 that BSS information was found and @@ -4016,13 +4093,32 @@ enum cfg80211_bss_frame_type { * Or %NULL on error. */ struct cfg80211_bss * __must_check +cfg80211_inform_bss_data(struct wiphy *wiphy, + struct cfg80211_inform_bss *data, + enum cfg80211_bss_frame_type ftype, + const u8 *bssid, u64 tsf, u16 capability, + u16 beacon_interval, const u8 *ie, size_t ielen, + gfp_t gfp); + +static inline struct cfg80211_bss * __must_check cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, - s32 signal, gfp_t gfp); + s32 signal, gfp_t gfp) +{ + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = scan_width, + .signal = signal, + }; + + return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, + capability, beacon_interval, ie, ielen, + gfp); +} static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, @@ -4032,11 +4128,15 @@ cfg80211_inform_bss(struct wiphy *wiphy, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { - return cfg80211_inform_bss_width(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, ftype, - bssid, tsf, capability, - beacon_interval, ie, ielen, signal, - gfp); + struct cfg80211_inform_bss data = { + .chan = rx_channel, + .scan_width = NL80211_BSS_CHAN_WIDTH_20, + .signal = signal, + }; + + return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf, + capability, beacon_interval, ie, ielen, + gfp); } struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 76b1ffaea863..171cd76558fb 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -27,6 +27,16 @@ struct wpan_phy; struct wpan_phy_cca; +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL +struct ieee802154_llsec_device_key; +struct ieee802154_llsec_seclevel; +struct ieee802154_llsec_params; +struct ieee802154_llsec_device; +struct ieee802154_llsec_table; +struct ieee802154_llsec_key_id; +struct ieee802154_llsec_key; +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + struct cfg802154_ops { struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, const char *name, @@ -65,6 +75,51 @@ struct cfg802154_ops { struct wpan_dev *wpan_dev, bool mode); int (*set_ackreq_default)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool ackreq); +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + void (*get_llsec_table)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + struct ieee802154_llsec_table **table); + void (*lock_llsec_table)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev); + void (*unlock_llsec_table)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev); + /* TODO remove locking/get table callbacks, this is part of the + * nl802154 interface and should be accessible from ieee802154 layer. + */ + int (*get_llsec_params)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + struct ieee802154_llsec_params *params); + int (*set_llsec_params)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_params *params, + int changed); + int (*add_llsec_key)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_key_id *id, + const struct ieee802154_llsec_key *key); + int (*del_llsec_key)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_key_id *id); + int (*add_seclevel)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_seclevel *sl); + int (*del_seclevel)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_seclevel *sl); + int (*add_device)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + const struct ieee802154_llsec_device *dev); + int (*del_device)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le64 extended_addr); + int (*add_devkey)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + __le64 extended_addr, + const struct ieee802154_llsec_device_key *key); + int (*del_devkey)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + __le64 extended_addr, + const struct ieee802154_llsec_device_key *key); +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ }; static inline bool @@ -167,6 +222,102 @@ struct wpan_phy { char priv[0] __aligned(NETDEV_ALIGN); }; +struct ieee802154_addr { + u8 mode; + __le16 pan_id; + union { + __le16 short_addr; + __le64 extended_addr; + }; +}; + +struct ieee802154_llsec_key_id { + u8 mode; + u8 id; + union { + struct ieee802154_addr device_addr; + __le32 short_source; + __le64 extended_source; + }; +}; + +#define IEEE802154_LLSEC_KEY_SIZE 16 + +struct ieee802154_llsec_key { + u8 frame_types; + u32 cmd_frame_ids; + /* TODO replace with NL802154_KEY_SIZE */ + u8 key[IEEE802154_LLSEC_KEY_SIZE]; +}; + +struct ieee802154_llsec_key_entry { + struct list_head list; + + struct ieee802154_llsec_key_id id; + struct ieee802154_llsec_key *key; +}; + +struct ieee802154_llsec_params { + bool enabled; + + __be32 frame_counter; + u8 out_level; + struct ieee802154_llsec_key_id out_key; + + __le64 default_key_source; + + __le16 pan_id; + __le64 hwaddr; + __le64 coord_hwaddr; + __le16 coord_shortaddr; +}; + +struct ieee802154_llsec_table { + struct list_head keys; + struct list_head devices; + struct list_head security_levels; +}; + +struct ieee802154_llsec_seclevel { + struct list_head list; + + u8 frame_type; + u8 cmd_frame_id; + bool device_override; + u32 sec_levels; +}; + +struct ieee802154_llsec_device { + struct list_head list; + + __le16 pan_id; + __le16 short_addr; + __le64 hwaddr; + u32 frame_counter; + bool seclevel_exempt; + + u8 key_mode; + struct list_head keys; +}; + +struct ieee802154_llsec_device_key { + struct list_head list; + + struct ieee802154_llsec_key_id key_id; + u32 frame_counter; +}; + +struct wpan_dev_header_ops { + /* TODO create callback currently assumes ieee802154_mac_cb inside + * skb->cb. This should be changed to give these information as + * parameter. + */ + int (*create)(struct sk_buff *skb, struct net_device *dev, + const struct ieee802154_addr *daddr, + const struct ieee802154_addr *saddr, + unsigned int len); +}; + struct wpan_dev { struct wpan_phy *wpan_phy; int iftype; @@ -175,6 +326,8 @@ struct wpan_dev { struct list_head list; struct net_device *netdev; + const struct wpan_dev_header_ops *header_ops; + /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */ struct net_device *lowpan_dev; @@ -205,6 +358,17 @@ struct wpan_dev { #define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) +static inline int +wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, + const struct ieee802154_addr *daddr, + const struct ieee802154_addr *saddr, + unsigned int len) +{ + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + + return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len); +} + struct wpan_phy * wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size); static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h index d0424269313f..5e902fc3f4eb 100644 --- a/include/net/dn_neigh.h +++ b/include/net/dn_neigh.h @@ -18,11 +18,11 @@ struct dn_neigh { void dn_neigh_init(void); void dn_neigh_cleanup(void); -int dn_neigh_router_hello(struct sock *sk, struct sk_buff *skb); -int dn_neigh_endnode_hello(struct sock *sk, struct sk_buff *skb); +int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb); +int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb); void dn_neigh_pointopoint_hello(struct sk_buff *skb); int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n); -int dn_to_neigh_output(struct sock *sk, struct sk_buff *skb); +int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb); extern struct neigh_table dn_neigh_table; diff --git a/include/net/dsa.h b/include/net/dsa.h index b34d812bc5d0..82a4c6011173 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -197,6 +197,11 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) return ds->pd->rtable[dst->cpu_switch]; } +struct switchdev_trans; +struct switchdev_obj; +struct switchdev_obj_port_fdb; +struct switchdev_obj_port_vlan; + struct dsa_switch_driver { struct list_head list; @@ -305,24 +310,32 @@ struct dsa_switch_driver { /* * VLAN support */ + int (*port_vlan_prepare)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); + int (*port_vlan_add)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); + int (*port_vlan_del)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid); - int (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid); - int (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid, - bool untagged); - int (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid); int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid, unsigned long *ports, unsigned long *untagged); /* * Forwarding database */ + int (*port_fdb_prepare)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); int (*port_fdb_add)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); int (*port_fdb_del)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); - int (*port_fdb_getnext)(struct dsa_switch *ds, int port, - unsigned char *addr, u16 *vid, - bool *is_static); + const struct switchdev_obj_port_fdb *fdb); + int (*port_fdb_dump)(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/dst.h b/include/net/dst.h index 9261d928303d..1279f9b09791 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -45,7 +45,7 @@ struct dst_entry { void *__pad1; #endif int (*input)(struct sk_buff *); - int (*output)(struct sock *sk, struct sk_buff *skb); + int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); unsigned short flags; #define DST_HOST 0x0001 @@ -365,10 +365,10 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, __skb_tunnel_rx(skb, dev, net); } -int dst_discard_sk(struct sock *sk, struct sk_buff *skb); +int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb); static inline int dst_discard(struct sk_buff *skb) { - return dst_discard_sk(skb->sk, skb); + return dst_discard_out(&init_net, skb->sk, skb); } void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, int initial_obsolete, unsigned short flags); @@ -454,13 +454,9 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout) } /* Output packet to network from transport. */ -static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb) +static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - return skb_dst(skb)->output(sk, skb); -} -static inline int dst_output(struct sk_buff *skb) -{ - return dst_output_sk(skb->sk, skb); + return skb_dst(skb)->output(net, sk, skb); } /* Input packet from network to transport. */ @@ -489,7 +485,8 @@ struct flowi; #ifndef CONFIG_XFRM static inline struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, - const struct flowi *fl, struct sock *sk, + const struct flowi *fl, + const struct sock *sk, int flags) { return dst_orig; @@ -498,7 +495,7 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, static inline struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, const struct flowi *fl, - struct sock *sk, + const struct sock *sk, int flags) { return dst_orig; @@ -511,11 +508,11 @@ static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) #else struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, - const struct flowi *fl, struct sock *sk, + const struct flowi *fl, const struct sock *sk, int flags); struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, - const struct flowi *fl, struct sock *sk, + const struct flowi *fl, const struct sock *sk, int flags); /* skb attached with this dst needs transformation if dst->xfrm is valid */ diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index af9d5382f6cb..ce009710120c 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -60,6 +60,38 @@ static inline struct metadata_dst *tun_rx_dst(int md_size) return tun_dst; } +static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + int md_size = md_dst->u.tun_info.options_len; + struct metadata_dst *new_md; + + if (!md_dst) + return ERR_PTR(-EINVAL); + + new_md = metadata_dst_alloc(md_size, GFP_ATOMIC); + if (!new_md) + return ERR_PTR(-ENOMEM); + + memcpy(&new_md->u.tun_info, &md_dst->u.tun_info, + sizeof(struct ip_tunnel_info) + md_size); + skb_dst_drop(skb); + dst_hold(&new_md->dst); + skb_dst_set(skb, &new_md->dst); + return new_md; +} + +static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb) +{ + struct metadata_dst *dst; + + dst = tun_dst_unclone(skb); + if (IS_ERR(dst)) + return NULL; + + return &dst->u.tun_info; +} + static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, __be16 flags, __be64 tunnel_id, diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index d64253914a6a..a0d443ca16fc 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -9,6 +9,7 @@ struct kmem_cachep; struct net_device; struct sk_buff; struct sock; +struct net; struct dst_ops { unsigned short family; @@ -28,7 +29,7 @@ struct dst_ops { struct sk_buff *skb, u32 mtu); void (*redirect)(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); - int (*local_out)(struct sk_buff *skb); + int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb); struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); diff --git a/include/net/ethoc.h b/include/net/ethoc.h index 2a2d6bb34eb8..bb7f467da7fc 100644 --- a/include/net/ethoc.h +++ b/include/net/ethoc.h @@ -17,6 +17,7 @@ struct ethoc_platform_data { u8 hwaddr[IFHWADDRLEN]; s8 phy_id; u32 eth_clkfreq; + bool big_endian; }; #endif /* !LINUX_NET_ETHOC_H */ diff --git a/include/net/flow.h b/include/net/flow.h index acd6a096250e..83969eebebf3 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -34,7 +34,8 @@ struct flowi_common { __u8 flowic_flags; #define FLOWI_FLAG_ANYSRC 0x01 #define FLOWI_FLAG_KNOWN_NH 0x02 -#define FLOWI_FLAG_VRFSRC 0x04 +#define FLOWI_FLAG_L3MDEV_SRC 0x04 +#define FLOWI_FLAG_SKIP_NH_OIF 0x08 __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; }; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index a9af1cc8c1bc..1b6b6dcb018d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -183,9 +183,8 @@ _genl_register_family_with_ops_grps(struct genl_family *family, (grps), ARRAY_SIZE(grps)) int genl_unregister_family(struct genl_family *family); -void genl_notify(struct genl_family *family, - struct sk_buff *skb, struct net *net, u32 portid, - u32 group, struct nlmsghdr *nlh, gfp_t flags); +void genl_notify(struct genl_family *family, struct sk_buff *skb, + struct genl_info *info, u32 group, gfp_t flags); struct sk_buff *genlmsg_new_unicast(size_t payload, struct genl_info *info, gfp_t flags); diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 2c10a9f0c6d9..a62a051a3a2f 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -50,15 +50,6 @@ struct ieee802154_sechdr { }; }; -struct ieee802154_addr { - u8 mode; - __le16 pan_id; - union { - __le16 short_addr; - __le64 extended_addr; - }; -}; - struct ieee802154_hdr_fc { #if defined(__LITTLE_ENDIAN_BITFIELD) u16 type:3, @@ -99,7 +90,7 @@ struct ieee802154_hdr { * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame * version, if SECEN is set. */ -int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr); +int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr); /* pulls the entire 802.15.4 header off of the skb, including the security * header, and performs pan id decompression @@ -243,38 +234,6 @@ static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb) return mac_cb(skb); } -#define IEEE802154_LLSEC_KEY_SIZE 16 - -struct ieee802154_llsec_key_id { - u8 mode; - u8 id; - union { - struct ieee802154_addr device_addr; - __le32 short_source; - __le64 extended_source; - }; -}; - -struct ieee802154_llsec_key { - u8 frame_types; - u32 cmd_frame_ids; - u8 key[IEEE802154_LLSEC_KEY_SIZE]; -}; - -struct ieee802154_llsec_key_entry { - struct list_head list; - - struct ieee802154_llsec_key_id id; - struct ieee802154_llsec_key *key; -}; - -struct ieee802154_llsec_device_key { - struct list_head list; - - struct ieee802154_llsec_key_id key_id; - u32 frame_counter; -}; - enum { IEEE802154_LLSEC_DEVKEY_IGNORE, IEEE802154_LLSEC_DEVKEY_RESTRICT, @@ -283,49 +242,6 @@ enum { __IEEE802154_LLSEC_DEVKEY_MAX, }; -struct ieee802154_llsec_device { - struct list_head list; - - __le16 pan_id; - __le16 short_addr; - __le64 hwaddr; - u32 frame_counter; - bool seclevel_exempt; - - u8 key_mode; - struct list_head keys; -}; - -struct ieee802154_llsec_seclevel { - struct list_head list; - - u8 frame_type; - u8 cmd_frame_id; - bool device_override; - u32 sec_levels; -}; - -struct ieee802154_llsec_params { - bool enabled; - - __be32 frame_counter; - u8 out_level; - struct ieee802154_llsec_key_id out_key; - - __le64 default_key_source; - - __le16 pan_id; - __le64 hwaddr; - __le64 coord_hwaddr; - __le16 coord_shortaddr; -}; - -struct ieee802154_llsec_table { - struct list_head keys; - struct list_head devices; - struct list_head security_levels; -}; - #define IEEE802154_MAC_SCAN_ED 0 #define IEEE802154_MAC_SCAN_ACTIVE 1 #define IEEE802154_MAC_SCAN_PASSIVE 2 diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 6d539e4e5ba7..064cfbe639d0 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -25,17 +25,8 @@ struct sockaddr; int inet6_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, bool relax); -struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, - const struct request_sock *req); - -struct request_sock *inet6_csk_search_req(struct sock *sk, - const __be16 rport, - const struct in6_addr *raddr, - const struct in6_addr *laddr, - const int iif); - -void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, - const unsigned long timeout); +struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6, + const struct request_sock *req, u8 proto); void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 279f83591971..109e3ee9108c 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -41,7 +41,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, static inline void inet_ctl_sock_destroy(struct sock *sk) { - sock_release(sk->sk_socket); + if (sk) + sock_release(sk->sk_socket); } #endif diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 0320bbb7d7b5..481fe1c9044c 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -41,9 +41,11 @@ struct inet_connection_sock_af_ops { int (*rebuild_header)(struct sock *sk); void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); - struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, + struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); u16 net_header_len; u16 net_frag_header_len; u16 sockaddr_len; @@ -258,31 +260,25 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); -struct request_sock *inet_csk_search_req(struct sock *sk, - const __be16 rport, - const __be32 raddr, - const __be32 laddr); int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, bool relax); int inet_csk_get_port(struct sock *sk, unsigned short snum); -struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, +struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, const struct request_sock *req); -struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, +struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, + struct sock *newsk, const struct request_sock *req); -static inline void inet_csk_reqsk_queue_add(struct sock *sk, - struct request_sock *req, - struct sock *child) -{ - reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child); -} - +void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, + struct sock *child); void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); +struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, + struct request_sock *req, + bool own_req); -static inline void inet_csk_reqsk_queue_added(struct sock *sk, - const unsigned long timeout) +static inline void inet_csk_reqsk_queue_added(struct sock *sk) { reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); } @@ -299,10 +295,11 @@ static inline int inet_csk_reqsk_queue_young(const struct sock *sk) static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { - return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); + return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; } void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); +void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); @@ -316,7 +313,7 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk) (POLLIN | POLLRDNORM) : 0; } -int inet_csk_listen_start(struct sock *sk, const int nr_table_entries); +int inet_csk_listen_start(struct sock *sk, int backlog); void inet_csk_listen_stop(struct sock *sk); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 53eead2da743..ac42bbb37b2d 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -108,7 +108,15 @@ struct inet_frags { int inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); -void inet_frags_init_net(struct netns_frags *nf); +static inline int inet_frags_init_net(struct netns_frags *nf) +{ + return percpu_counter_init(&nf->mem, 0, GFP_KERNEL); +} +static inline void inet_frags_uninit_net(struct netns_frags *nf) +{ + percpu_counter_destroy(&nf->mem); +} + void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); @@ -154,11 +162,6 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i) __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); } -static inline void init_frag_mem_limit(struct netns_frags *nf) -{ - percpu_counter_init(&nf->mem, 0, GFP_KERNEL); -} - static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) { unsigned int res; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index b07d126694a7..de2e3ade6102 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -199,13 +199,14 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk) } /* Caller must disable local BH processing. */ -int __inet_inherit_port(struct sock *sk, struct sock *child); +int __inet_inherit_port(const struct sock *sk, struct sock *child); void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); -void __inet_hash_nolisten(struct sock *sk, struct sock *osk); +bool inet_ehash_insert(struct sock *sk, struct sock *osk); +bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); void __inet_hash(struct sock *sk, struct sock *osk); void inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 47eb67b08abd..f5bf7310e334 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -245,7 +245,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr, } struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, - struct sock *sk_listener); + struct sock *sk_listener, + bool attach_listener); static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 879d6e5a973b..c9b3eb70f340 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -70,6 +70,7 @@ struct inet_timewait_sock { #define tw_dport __tw_common.skc_dport #define tw_num __tw_common.skc_num #define tw_cookie __tw_common.skc_cookie +#define tw_dr __tw_common.skc_tw_dr int tw_timeout; volatile unsigned char tw_substate; @@ -88,7 +89,6 @@ struct inet_timewait_sock { kmemcheck_bitfield_end(flags); struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; - struct inet_timewait_death_row *tw_dr; }; #define tw_tclass tw_tos @@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo); -void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); +void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, + bool rearm); + +static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, false); +} + +static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, true); +} + void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); void inet_twsk_purge(struct inet_hashinfo *hashinfo, diff --git a/include/net/ip.h b/include/net/ip.h index 9b9ca2839399..1a98f1ca1638 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -100,24 +100,20 @@ int igmp_mc_init(void); * Functions provided by ip.c */ -int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, +int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, __be32 saddr, __be32 daddr, struct ip_options_rcu *opt); int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); int ip_local_deliver(struct sk_buff *skb); int ip_mr_input(struct sk_buff *skb); -int ip_output(struct sock *sk, struct sk_buff *skb); -int ip_mc_output(struct sock *sk, struct sk_buff *skb); -int ip_do_fragment(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)); +int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); +int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); +int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); void ip_send_check(struct iphdr *ip); -int __ip_local_out(struct sk_buff *skb); -int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); -static inline int ip_local_out(struct sk_buff *skb) -{ - return ip_local_out_sk(skb->sk, skb); -} +int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); +int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); void ip_init(void); @@ -282,10 +278,12 @@ int ip_decrease_ttl(struct iphdr *iph) } static inline -int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) +int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) { - return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || - (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && + u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); + + return pmtudisc == IP_PMTUDISC_DO || + (pmtudisc == IP_PMTUDISC_WANT && !(dst_metric_locked(dst, RTAX_MTU))); } @@ -321,12 +319,15 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) { - if (!skb->sk || ip_sk_use_pmtu(skb->sk)) { + struct sock *sk = skb->sk; + + if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; + return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); - } else { - return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); } + + return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); } u32 ip_idents_reserve(u32 hash, int segs); @@ -505,11 +506,11 @@ static inline bool ip_defrag_user_in_between(u32 user, return user >= lower_bond && user <= upper_bond; } -int ip_defrag(struct sk_buff *skb, u32 user); +int ip_defrag(struct net *net, struct sk_buff *skb, u32 user); #ifdef CONFIG_INET -struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); +struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user); #else -static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) +static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) { return skb; } diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 063d30474cf6..aaf9700fc9e5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info, struct mx6_config *mxc); int fib6_del(struct rt6_info *rt, struct nl_info *info); -void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); +void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, + unsigned int flags); void fib6_run_gc(unsigned long expires, struct net *net, bool force); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 297629aadb19..2bfb2ad2fab1 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -173,8 +173,8 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } -int ip6_fragment(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)); +int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, + int (*output)(struct net *, struct sock *, struct sk_buff *)); static inline int ip6_skb_dst_mtu(struct sk_buff *skb) { diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index b8529aa1dae7..aaee6fa02cf1 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -32,6 +32,12 @@ struct __ip6_tnl_parm { __be32 o_key; }; +struct ip6_tnl_dst { + seqlock_t lock; + struct dst_entry __rcu *dst; + u32 cookie; +}; + /* IPv6 tunnel */ struct ip6_tnl { struct ip6_tnl __rcu *next; /* next tunnel in list */ @@ -39,8 +45,7 @@ struct ip6_tnl { struct net *net; /* netns for packet i/o */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct flowi fl; /* flowi template for xmit */ - struct dst_entry *dst_cache; /* cached dst */ - u32 dst_cookie; + struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */ int err_count; unsigned long err_time; @@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim { __u8 encap_limit; /* tunnel encapsulation limit */ } __packed; -struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); +struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t); +int ip6_tnl_dst_init(struct ip6_tnl *t); +void ip6_tnl_dst_destroy(struct ip6_tnl *t); void ip6_tnl_dst_reset(struct ip6_tnl *t); -void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); +void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, @@ -79,8 +86,8 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device_stats *stats = &dev->stats; int pkt_len, err; - pkt_len = skb->len; - err = ip6_local_out_sk(sk, skb); + pkt_len = skb->len - skb_inner_network_offset(skb); + err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a37d0432bebd..9f4df68105ab 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -79,7 +79,7 @@ struct fib_nh { unsigned char nh_scope; #ifdef CONFIG_IP_ROUTE_MULTIPATH int nh_weight; - int nh_power; + atomic_t nh_upper_bound; #endif #ifdef CONFIG_IP_ROUTE_CLASSID __u32 nh_tclassid; @@ -118,7 +118,7 @@ struct fib_info { #define fib_advmss fib_metrics[RTAX_ADVMSS-1] int fib_nhs; #ifdef CONFIG_IP_ROUTE_MULTIPATH - int fib_power; + int fib_weight; #endif struct rcu_head rcu; struct fib_nh fib_nh[0]; @@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, rcu_read_lock(); tb = fib_get_table(net, RT_TABLE_MAIN); - if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) - err = 0; + if (tb) + err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF); + + if (err == -EAGAIN) + err = -ENETUNREACH; rcu_read_unlock(); @@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags) { struct fib_table *tb; - int err; + int err = -ENETUNREACH; flags |= FIB_LOOKUP_NOREF; if (net->ipv4.fib_has_custom_rules) @@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, res->tclassid = 0; - for (err = 0; !err; err = -ENETUNREACH) { - tb = rcu_dereference_rtnl(net->ipv4.fib_main); - if (tb && !fib_table_lookup(tb, flp, res, flags)) - break; + tb = rcu_dereference_rtnl(net->ipv4.fib_main); + if (tb) + err = fib_table_lookup(tb, flp, res, flags); + + if (!err) + goto out; - tb = rcu_dereference_rtnl(net->ipv4.fib_default); - if (tb && !fib_table_lookup(tb, flp, res, flags)) - break; - } + tb = rcu_dereference_rtnl(net->ipv4.fib_default); + if (tb) + err = fib_table_lookup(tb, flp, res, flags); + +out: + if (err == -EAGAIN) + err = -ENETUNREACH; rcu_read_unlock(); @@ -309,10 +317,20 @@ void fib_flush_external(struct net *net); /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); -int fib_sync_down_dev(struct net_device *dev, unsigned long event); +int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); int fib_sync_down_addr(struct net *net, __be32 local); int fib_sync_up(struct net_device *dev, unsigned int nh_flags); -void fib_select_multipath(struct fib_result *res); + +extern u32 fib_multipath_secret __read_mostly; + +static inline int fib_multipath_hash(__be32 saddr, __be32 daddr) +{ + return jhash_2words(saddr, daddr, fib_multipath_secret) >> 1; +} + +void fib_select_multipath(struct fib_result *res, int hash); +void fib_select_path(struct net *net, struct fib_result *res, + struct flowi4 *fl4, int mp_hash); /* Exported by fib_trie.c */ void fib_trie_init(void); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 9a6a3ba888e8..f6dafec9102c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); +struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, + gfp_t flags); struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, int gso_type_mask); diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 9b9ca87a4210..0816c872b689 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -29,65 +29,15 @@ #endif #include <net/net_namespace.h> /* Netw namespace */ +#define IP_VS_HDR_INVERSE 1 +#define IP_VS_HDR_ICMP 2 + /* Generic access of ipvs struct */ static inline struct netns_ipvs *net_ipvs(struct net* net) { return net->ipvs; } -/* Get net ptr from skb in traffic cases - * use skb_sknet when call is from userland (ioctl or netlink) - */ -static inline struct net *skb_net(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_NS -#ifdef CONFIG_IP_VS_DEBUG - /* - * This is used for debug only. - * Start with the most likely hit - * End with BUG - */ - if (likely(skb->dev && dev_net(skb->dev))) - return dev_net(skb->dev); - if (skb_dst(skb) && skb_dst(skb)->dev) - return dev_net(skb_dst(skb)->dev); - WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", - __func__, __LINE__); - if (likely(skb->sk && sock_net(skb->sk))) - return sock_net(skb->sk); - pr_err("There is no net ptr to find in the skb in %s() line:%d\n", - __func__, __LINE__); - BUG(); -#else - return dev_net(skb->dev ? : skb_dst(skb)->dev); -#endif -#else - return &init_net; -#endif -} - -static inline struct net *skb_sknet(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_NS -#ifdef CONFIG_IP_VS_DEBUG - /* Start with the most likely hit */ - if (likely(skb->sk && sock_net(skb->sk))) - return sock_net(skb->sk); - WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n", - __func__, __LINE__); - if (likely(skb->dev && dev_net(skb->dev))) - return dev_net(skb->dev); - pr_err("There is no net ptr to find in the skb in %s() line:%d\n", - __func__, __LINE__); - BUG(); -#else - return sock_net(skb->sk); -#endif -#else - return &init_net; -#endif -} - /* This one needed for single_open_net since net is stored directly in * private not as a struct i.e. seq_file_net can't be used. */ @@ -104,6 +54,8 @@ static inline struct net *seq_file_single_net(struct seq_file *seq) extern int ip_vs_conn_tab_size; struct ip_vs_iphdr { + int hdr_flags; /* ipvs flags */ + __u32 off; /* Where IP or IPv4 header starts */ __u32 len; /* IPv4 simply where L4 starts * IPv6 where L4 Transport Header starts */ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ @@ -120,48 +72,89 @@ static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, return skb_header_pointer(skb, offset, len, buffer); } -static inline void -ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr) -{ - const struct iphdr *iph = nh; - - iphdr->len = iph->ihl * 4; - iphdr->fragoffs = 0; - iphdr->protocol = iph->protocol; - iphdr->saddr.ip = iph->saddr; - iphdr->daddr.ip = iph->daddr; -} - /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. * IPv6 requires some extra work, as finding proper header position, * depend on the IPv6 extension headers. */ -static inline void -ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr) +static inline int +ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset, + int hdr_flags, struct ip_vs_iphdr *iphdr) { + iphdr->hdr_flags = hdr_flags; + iphdr->off = offset; + #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { - const struct ipv6hdr *iph = - (struct ipv6hdr *)skb_network_header(skb); + struct ipv6hdr _iph; + const struct ipv6hdr *iph = skb_header_pointer( + skb, offset, sizeof(_iph), &_iph); + if (!iph) + return 0; + iphdr->saddr.in6 = iph->saddr; iphdr->daddr.in6 = iph->daddr; /* ipv6_find_hdr() updates len, flags */ - iphdr->len = 0; + iphdr->len = offset; iphdr->flags = 0; iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, &iphdr->fragoffs, &iphdr->flags); + if (iphdr->protocol < 0) + return 0; } else #endif { - const struct iphdr *iph = - (struct iphdr *)skb_network_header(skb); - iphdr->len = iph->ihl * 4; + struct iphdr _iph; + const struct iphdr *iph = skb_header_pointer( + skb, offset, sizeof(_iph), &_iph); + if (!iph) + return 0; + + iphdr->len = offset + iph->ihl * 4; iphdr->fragoffs = 0; iphdr->protocol = iph->protocol; iphdr->saddr.ip = iph->saddr; iphdr->daddr.ip = iph->daddr; } + + return 1; +} + +static inline int +ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset, + bool inverse, struct ip_vs_iphdr *iphdr) +{ + int hdr_flags = IP_VS_HDR_ICMP; + + if (inverse) + hdr_flags |= IP_VS_HDR_INVERSE; + + return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr); +} + +static inline int +ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse, + struct ip_vs_iphdr *iphdr) +{ + int hdr_flags = 0; + + if (inverse) + hdr_flags |= IP_VS_HDR_INVERSE; + + return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb), + hdr_flags, iphdr); +} + +static inline bool +ip_vs_iph_inverse(const struct ip_vs_iphdr *iph) +{ + return !!(iph->hdr_flags & IP_VS_HDR_INVERSE); +} + +static inline bool +ip_vs_iph_icmp(const struct ip_vs_iphdr *iph) +{ + return !!(iph->hdr_flags & IP_VS_HDR_ICMP); } static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, @@ -437,26 +430,27 @@ struct ip_vs_protocol { void (*exit)(struct ip_vs_protocol *pp); - int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); + int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); - void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); + void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); - int (*conn_schedule)(int af, struct sk_buff *skb, + int (*conn_schedule)(struct netns_ipvs *ipvs, + int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, int *verdict, struct ip_vs_conn **cpp, struct ip_vs_iphdr *iph); struct ip_vs_conn * - (*conn_in_get)(int af, + (*conn_in_get)(struct netns_ipvs *ipvs, + int af, const struct sk_buff *skb, - const struct ip_vs_iphdr *iph, - int inverse); + const struct ip_vs_iphdr *iph); struct ip_vs_conn * - (*conn_out_get)(int af, + (*conn_out_get)(struct netns_ipvs *ipvs, + int af, const struct sk_buff *skb, - const struct ip_vs_iphdr *iph, - int inverse); + const struct ip_vs_iphdr *iph); int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); @@ -473,9 +467,9 @@ struct ip_vs_protocol { const struct sk_buff *skb, struct ip_vs_proto_data *pd); - int (*register_app)(struct net *net, struct ip_vs_app *inc); + int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); - void (*unregister_app)(struct net *net, struct ip_vs_app *inc); + void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); int (*app_conn_bind)(struct ip_vs_conn *cp); @@ -497,11 +491,11 @@ struct ip_vs_proto_data { }; struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); -struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net, +struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto); struct ip_vs_conn_param { - struct net *net; + struct netns_ipvs *ipvs; const union nf_inet_addr *caddr; const union nf_inet_addr *vaddr; __be16 cport; @@ -528,9 +522,7 @@ struct ip_vs_conn { volatile __u32 flags; /* status flags */ __u16 protocol; /* Which protocol (TCP/UDP) */ __u16 daf; /* Address family of the dest */ -#ifdef CONFIG_NET_NS - struct net *net; /* Name space */ -#endif + struct netns_ipvs *ipvs; /* counter and timer */ atomic_t refcnt; /* reference count */ @@ -577,33 +569,6 @@ struct ip_vs_conn { struct rcu_head rcu_head; }; -/* To save some memory in conn table when name space is disabled. */ -static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp) -{ -#ifdef CONFIG_NET_NS - return cp->net; -#else - return &init_net; -#endif -} - -static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net) -{ -#ifdef CONFIG_NET_NS - cp->net = net; -#endif -} - -static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp, - struct net *net) -{ -#ifdef CONFIG_NET_NS - return cp->net == net; -#else - return 1; -#endif -} - /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user * for IPv6 support. * @@ -663,7 +628,7 @@ struct ip_vs_service { unsigned int flags; /* service status flags */ unsigned int timeout; /* persistent timeout in ticks */ __be32 netmask; /* grouping granularity, mask/plen */ - struct net *net; + struct netns_ipvs *ipvs; struct list_head destinations; /* real server d-linked list */ __u32 num_dests; /* number of servers */ @@ -953,6 +918,8 @@ struct netns_ipvs { int sysctl_pmtu_disc; int sysctl_backup_only; int sysctl_conn_reuse_mode; + int sysctl_schedule_icmp; + int sysctl_ignore_tunneled; /* ip_vs_lblc */ int sysctl_lblc_expiration; @@ -1071,6 +1038,21 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return ipvs->sysctl_conn_reuse_mode; } +static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_schedule_icmp; +} + +static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_ignore_tunneled; +} + +static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_cache_bypass; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -1143,6 +1125,21 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return 1; } +static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) +{ + return 0; +} + +static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) +{ + return 0; +} + +static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) +{ + return 0; +} + #endif /* IPVS core functions @@ -1164,14 +1161,14 @@ enum { IP_VS_DIR_LAST, }; -static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol, +static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol, const union nf_inet_addr *caddr, __be16 cport, const union nf_inet_addr *vaddr, __be16 vport, struct ip_vs_conn_param *p) { - p->net = net; + p->ipvs = ipvs; p->af = af; p->protocol = protocol; p->caddr = caddr; @@ -1185,15 +1182,15 @@ static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol, struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); -struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, - const struct ip_vs_iphdr *iph, - int inverse); +struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, + const struct sk_buff *skb, + const struct ip_vs_iphdr *iph); struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); -struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, - const struct ip_vs_iphdr *iph, - int inverse); +struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, + const struct sk_buff *skb, + const struct ip_vs_iphdr *iph); /* Get reference to gain full access to conn. * By default, RCU read-side critical sections have access only to @@ -1221,9 +1218,9 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp); const char *ip_vs_state_name(__u16 proto, int state); -void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp); +void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); int ip_vs_check_template(struct ip_vs_conn *ct); -void ip_vs_random_dropentry(struct net *net); +void ip_vs_random_dropentry(struct netns_ipvs *ipvs); int ip_vs_conn_init(void); void ip_vs_conn_cleanup(void); @@ -1288,29 +1285,29 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) } /* IPVS netns init & cleanup functions */ -int ip_vs_estimator_net_init(struct net *net); -int ip_vs_control_net_init(struct net *net); -int ip_vs_protocol_net_init(struct net *net); -int ip_vs_app_net_init(struct net *net); -int ip_vs_conn_net_init(struct net *net); -int ip_vs_sync_net_init(struct net *net); -void ip_vs_conn_net_cleanup(struct net *net); -void ip_vs_app_net_cleanup(struct net *net); -void ip_vs_protocol_net_cleanup(struct net *net); -void ip_vs_control_net_cleanup(struct net *net); -void ip_vs_estimator_net_cleanup(struct net *net); -void ip_vs_sync_net_cleanup(struct net *net); -void ip_vs_service_net_cleanup(struct net *net); +int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); +int ip_vs_control_net_init(struct netns_ipvs *ipvs); +int ip_vs_protocol_net_init(struct netns_ipvs *ipvs); +int ip_vs_app_net_init(struct netns_ipvs *ipvs); +int ip_vs_conn_net_init(struct netns_ipvs *ipvs); +int ip_vs_sync_net_init(struct netns_ipvs *ipvs); +void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs); +void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs); +void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs); +void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs); +void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs); +void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs); +void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs); /* IPVS application functions * (from ip_vs_app.c) */ #define IP_VS_APP_MAX_PORTS 8 -struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app); -void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app); +struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); +void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); void ip_vs_unbind_app(struct ip_vs_conn *cp); -int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto, +int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, __u16 port); int ip_vs_app_inc_get(struct ip_vs_app *inc); void ip_vs_app_inc_put(struct ip_vs_app *inc); @@ -1375,10 +1372,10 @@ extern struct ip_vs_stats ip_vs_stats; extern int sysctl_ip_vs_sync_ver; struct ip_vs_service * -ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol, +ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport); -bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol, +bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); int ip_vs_use_count_inc(void); @@ -1388,7 +1385,7 @@ void ip_vs_unregister_nl_ioctl(void); int ip_vs_control_init(void); void ip_vs_control_cleanup(void); struct ip_vs_dest * -ip_vs_find_dest(struct net *net, int svc_af, int dest_af, +ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol, __u32 fwmark, __u32 flags); @@ -1414,14 +1411,14 @@ static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) /* IPVS sync daemon data and function prototypes * (from ip_vs_sync.c) */ -int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg, +int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg, int state); -int stop_sync_thread(struct net *net, int state); -void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); +int stop_sync_thread(struct netns_ipvs *ipvs, int state); +void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts); /* IPVS rate estimator prototypes (from ip_vs_est.c) */ -void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); -void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); +void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); +void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); void ip_vs_zero_estimator(struct ip_vs_stats *stats); void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 711cca428cc8..e1a10b0ac0b0 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -807,12 +807,12 @@ static inline u8 ip6_tclass(__be32 flowinfo) int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); -int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb); +int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); /* * upper-layer output functions */ -int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, +int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt, int tclass); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); @@ -849,7 +849,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); -struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, +struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); @@ -860,14 +860,13 @@ struct dst_entry *ip6_blackhole_route(struct net *net, * skb processing functions */ -int ip6_output(struct sock *sk, struct sk_buff *skb); +int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_forward(struct sk_buff *skb); int ip6_input(struct sk_buff *skb); int ip6_mc_input(struct sk_buff *skb); -int __ip6_local_out(struct sk_buff *skb); -int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb); -int ip6_local_out(struct sk_buff *skb); +int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); +int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); /* * Extension header (options) processing diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index 0894ced31957..b867b0cf79e8 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h @@ -141,14 +141,14 @@ struct iucv_handler { * called is the order of the registration of the iucv handlers * to the base code. */ - int (*path_pending)(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); + int (*path_pending)(struct iucv_path *, u8 *ipvmid, u8 *ipuser); /* * The path_complete function is called after an iucv interrupt * type 0x02 has been received for a path that has been established * for this handler with iucv_path_connect and got accepted by the * peer with iucv_path_accept. */ - void (*path_complete)(struct iucv_path *, u8 ipuser[16]); + void (*path_complete)(struct iucv_path *, u8 *ipuser); /* * The path_severed function is called after an iucv interrupt * type 0x03 has been received. The communication peer shutdown @@ -156,20 +156,20 @@ struct iucv_handler { * remaining messages can be received until a iucv_path_sever * shuts down the other end of the path as well. */ - void (*path_severed)(struct iucv_path *, u8 ipuser[16]); + void (*path_severed)(struct iucv_path *, u8 *ipuser); /* * The path_quiesced function is called after an icuv interrupt * type 0x04 has been received. The communication peer has quiesced * the path. Delivery of messages is stopped until iucv_path_resume * has been called. */ - void (*path_quiesced)(struct iucv_path *, u8 ipuser[16]); + void (*path_quiesced)(struct iucv_path *, u8 *ipuser); /* * The path_resumed function is called after an icuv interrupt * type 0x05 has been received. The communication peer has resumed * the path. */ - void (*path_resumed)(struct iucv_path *, u8 ipuser[16]); + void (*path_resumed)(struct iucv_path *, u8 *ipuser); /* * The message_pending function is called after an icuv interrupt * type 0x06 or type 0x07 has been received. A new message is @@ -256,7 +256,7 @@ static inline void iucv_path_free(struct iucv_path *path) * Returns the result of the CP IUCV call. */ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, - u8 userdata[16], void *private); + u8 *userdata, void *private); /** * iucv_path_connect @@ -274,7 +274,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, * Returns the result of the CP IUCV call. */ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, - u8 userid[8], u8 system[8], u8 userdata[16], + u8 *userid, u8 *system, u8 *userdata, void *private); /** @@ -287,7 +287,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, * * Returns the result from the CP IUCV call. */ -int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]); +int iucv_path_quiesce(struct iucv_path *path, u8 *userdata); /** * iucv_path_resume: @@ -299,7 +299,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]); * * Returns the result from the CP IUCV call. */ -int iucv_path_resume(struct iucv_path *path, u8 userdata[16]); +int iucv_path_resume(struct iucv_path *path, u8 *userdata); /** * iucv_path_sever @@ -310,7 +310,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]); * * Returns the result from the CP IUCV call. */ -int iucv_path_sever(struct iucv_path *path, u8 userdata[16]); +int iucv_path_sever(struct iucv_path *path, u8 *userdata); /** * iucv_message_purge diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h new file mode 100644 index 000000000000..774d85b2d5d9 --- /dev/null +++ b/include/net/l3mdev.h @@ -0,0 +1,222 @@ +/* + * include/net/l3mdev.h - L3 master device API + * Copyright (c) 2015 Cumulus Networks + * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _NET_L3MDEV_H_ +#define _NET_L3MDEV_H_ + +/** + * struct l3mdev_ops - l3mdev operations + * + * @l3mdev_fib_table: Get FIB table id to use for lookups + * + * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device + * + * @l3mdev_get_saddr: Get source address for a flow + * + * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device + */ + +struct l3mdev_ops { + u32 (*l3mdev_fib_table)(const struct net_device *dev); + + /* IPv4 ops */ + struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev, + const struct flowi4 *fl4); + void (*l3mdev_get_saddr)(struct net_device *dev, + struct flowi4 *fl4); + + /* IPv6 ops */ + struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev, + const struct flowi6 *fl6); +}; + +#ifdef CONFIG_NET_L3_MASTER_DEV + +int l3mdev_master_ifindex_rcu(struct net_device *dev); +static inline int l3mdev_master_ifindex(struct net_device *dev) +{ + int ifindex; + + rcu_read_lock(); + ifindex = l3mdev_master_ifindex_rcu(dev); + rcu_read_unlock(); + + return ifindex; +} + +/* get index of an interface to use for FIB lookups. For devices + * enslaved to an L3 master device FIB lookups are based on the + * master index + */ +static inline int l3mdev_fib_oif_rcu(struct net_device *dev) +{ + return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex; +} + +static inline int l3mdev_fib_oif(struct net_device *dev) +{ + int oif; + + rcu_read_lock(); + oif = l3mdev_fib_oif_rcu(dev); + rcu_read_unlock(); + + return oif; +} + +u32 l3mdev_fib_table_rcu(const struct net_device *dev); +u32 l3mdev_fib_table_by_index(struct net *net, int ifindex); +static inline u32 l3mdev_fib_table(const struct net_device *dev) +{ + u32 tb_id; + + rcu_read_lock(); + tb_id = l3mdev_fib_table_rcu(dev); + rcu_read_unlock(); + + return tb_id; +} + +static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev, + const struct flowi4 *fl4) +{ + if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable) + return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4); + + return NULL; +} + +static inline bool netif_index_is_l3_master(struct net *net, int ifindex) +{ + struct net_device *dev; + bool rc = false; + + if (ifindex == 0) + return false; + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev) + rc = netif_is_l3_master(dev); + + rcu_read_unlock(); + + return rc; +} + +static inline void l3mdev_get_saddr(struct net *net, int ifindex, + struct flowi4 *fl4) +{ + struct net_device *dev; + + if (ifindex) { + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev && netif_is_l3_master(dev) && + dev->l3mdev_ops->l3mdev_get_saddr) { + dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4); + } + + rcu_read_unlock(); + } +} + +static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev, + const struct flowi6 *fl6) +{ + if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst) + return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6); + + return NULL; +} + +static inline +struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net, + const struct flowi6 *fl6) +{ + struct dst_entry *dst = NULL; + struct net_device *dev; + + dev = dev_get_by_index(net, fl6->flowi6_oif); + if (dev) { + dst = l3mdev_get_rt6_dst(dev, fl6); + dev_put(dev); + } + + return dst; +} + +#else + +static inline int l3mdev_master_ifindex_rcu(struct net_device *dev) +{ + return 0; +} +static inline int l3mdev_master_ifindex(struct net_device *dev) +{ + return 0; +} + +static inline int l3mdev_fib_oif_rcu(struct net_device *dev) +{ + return dev ? dev->ifindex : 0; +} +static inline int l3mdev_fib_oif(struct net_device *dev) +{ + return dev ? dev->ifindex : 0; +} + +static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev) +{ + return 0; +} +static inline u32 l3mdev_fib_table(const struct net_device *dev) +{ + return 0; +} +static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex) +{ + return 0; +} + +static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev, + const struct flowi4 *fl4) +{ + return NULL; +} + +static inline bool netif_index_is_l3_master(struct net *net, int ifindex) +{ + return false; +} + +static inline void l3mdev_get_saddr(struct net *net, int ifindex, + struct flowi4 *fl4) +{ +} + +static inline +struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev, + const struct flowi6 *fl6) +{ + return NULL; +} +static inline +struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net, + const struct flowi6 *fl6) +{ + return NULL; +} +#endif + +#endif /* _NET_L3MDEV_H_ */ diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index fce0e35e74d0..66350ce3e955 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -18,7 +18,7 @@ struct lwtunnel_state { __u16 type; __u16 flags; atomic_t refcnt; - int (*orig_output)(struct sock *sk, struct sk_buff *skb); + int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*orig_input)(struct sk_buff *); int len; __u8 data[0]; @@ -28,7 +28,7 @@ struct lwtunnel_encap_ops { int (*build_state)(struct net_device *dev, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **ts); - int (*output)(struct sock *sk, struct sk_buff *skb); + int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*input)(struct sk_buff *skb); int (*fill_encap)(struct sk_buff *skb, struct lwtunnel_state *lwtstate); @@ -88,7 +88,7 @@ int lwtunnel_fill_encap(struct sk_buff *skb, int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate); struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len); int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b); -int lwtunnel_output(struct sock *sk, struct sk_buff *skb); +int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb); int lwtunnel_input(struct sk_buff *skb); #else @@ -160,7 +160,7 @@ static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a, return 0; } -static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb) +static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { return -EOPNOTSUPP; } diff --git a/include/net/mac80211.h b/include/net/mac80211.h index bfc569498bfa..82045fca388b 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -5,6 +5,7 @@ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -481,7 +482,9 @@ struct ieee80211_event { * Note that with TDLS this can be the case (channel is HT, protection must * be used from this field) even when the BSS association isn't using HT. * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value - * implies disabled + * implies disabled. As with the cfg80211 callback, a change here should + * cause an event to be sent indicating where the current value is in + * relation to the newly configured threshold. * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The * may filter ARP queries targeted for other addresses than listed here. @@ -1240,11 +1243,6 @@ enum ieee80211_smps_mode { * @flags: configuration flags defined above * * @listen_interval: listen interval in units of beacon interval - * @max_sleep_period: the maximum number of beacon intervals to sleep for - * before checking the beacon for a TIM bit (managed mode only); this - * value will be only achievable between DTIM frames, the hardware - * needs to check for the multicast traffic bit in DTIM beacons. - * This variable is valid only when the CONF_PS flag is set. * @ps_dtim_period: The DTIM period of the AP we're connected to, for use * in power saving. Power saving will not be enabled until a beacon * has been received and the DTIM period is known. @@ -1274,7 +1272,6 @@ enum ieee80211_smps_mode { struct ieee80211_conf { u32 flags; int power_level, dynamic_ps_timeout; - int max_sleep_period; u16 listen_interval; u8 ps_dtim_period; @@ -1360,6 +1357,8 @@ enum ieee80211_vif_flags { * @debugfs_dir: debugfs dentry, can be used by drivers to create own per * interface debug files. Note that it will be NULL for the virtual * monitor interface (if that is requested.) + * @probe_req_reg: probe requests should be reported to mac80211 for this + * interface. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) @@ -1384,6 +1383,8 @@ struct ieee80211_vif { struct dentry *debugfs_dir; #endif + unsigned int probe_req_reg; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; @@ -1494,10 +1495,8 @@ enum ieee80211_key_flags { * - Temporal Authenticator Rx MIC Key (64 bits) * @icv_len: The ICV length for this key type * @iv_len: The IV length for this key type - * @drv_priv: pointer for driver use */ struct ieee80211_key_conf { - void *drv_priv; atomic64_t tx_pn; u32 cipher; u8 icv_len; @@ -1680,6 +1679,7 @@ struct ieee80211_sta_rates { * @tdls: indicates whether the STA is a TDLS peer * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. + * @mfp: indicates whether the STA uses management frame protection or not. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1697,6 +1697,7 @@ struct ieee80211_sta { struct ieee80211_sta_rates __rcu *rates; bool tdls; bool tdls_initiator; + bool mfp; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; @@ -1894,6 +1895,12 @@ struct ieee80211_txq { * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth * than then BSS bandwidth for a TDLS link on the base channel. * + * @IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU: The driver supports receiving A-MSDUs + * within A-MPDU. + * + * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status + * for sent beacons. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -1927,6 +1934,8 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_CLONED_SKBS, IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, IEEE80211_HW_TDLS_WIDER_BW, + IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, + IEEE80211_HW_BEACON_TX_STATUS, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2827,6 +2836,13 @@ enum ieee80211_reconfig_type { * See the section "Frame filtering" for more information. * This callback must be implemented and can sleep. * + * @config_iface_filter: Configure the interface's RX filter. + * This callback is optional and is used to configure which frames + * should be passed to mac80211. The filter_flags is the combination + * of FIF_* flags. The changed_flags is a bit mask that indicates + * which flags are changed. + * This callback can sleep. + * * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit * must be set or cleared for a given STA. Must be atomic. * @@ -3016,6 +3032,9 @@ enum ieee80211_reconfig_type { * buffer size of 8. Correct ways to retransmit #1 would be: * - TX: 1 or 18 or 81 * Even "189" would be wrong since 1 could be lost again. + * The @amsdu parameter is valid when the action is set to + * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability + * to receive A-MSDU within A-MPDU. * * Returns a negative error code on failure. * The callback can sleep. @@ -3153,18 +3172,24 @@ enum ieee80211_reconfig_type { * The callback is optional and can sleep. * * @add_chanctx: Notifies device driver about new channel context creation. + * This callback may sleep. * @remove_chanctx: Notifies device driver about channel context destruction. + * This callback may sleep. * @change_chanctx: Notifies device driver about channel context changes that * may happen when combining different virtual interfaces on the same * channel context with different settings + * This callback may sleep. * @assign_vif_chanctx: Notifies device driver about channel context being bound * to vif. Possible use is for hw queue remapping. + * This callback may sleep. * @unassign_vif_chanctx: Notifies device driver about channel context being * unbound from vif. + * This callback may sleep. * @switch_vif_chanctx: switch a number of vifs from one chanctx to * another, as specified in the list of * @ieee80211_vif_chanctx_switch passed to the driver, according * to the mode defined in &ieee80211_chanctx_switch_mode. + * This callback may sleep. * * @start_ap: Start operation on the AP interface, this is called after all the * information in bss_conf is set and beacon can be retrieved. A channel @@ -3266,6 +3291,10 @@ struct ieee80211_ops { unsigned int changed_flags, unsigned int *total_flags, u64 multicast); + void (*config_iface_filter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int filter_flags, + unsigned int changed_flags); int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -3349,7 +3378,7 @@ struct ieee80211_ops { struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size); + u8 buf_size, bool amsdu); int (*get_survey)(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void (*rfkill_poll)(struct ieee80211_hw *hw); diff --git a/include/net/mac802154.h b/include/net/mac802154.h index b7f99615224b..da574bbdc333 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -23,14 +23,6 @@ #include <net/cfg802154.h> -/* General MAC frame format: - * 2 bytes: Frame Control - * 1 byte: Sequence Number - * 20 bytes: Addressing fields - * 14 bytes: Auxiliary Security Header - */ -#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14) - /** * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags * @@ -250,6 +242,21 @@ struct ieee802154_ops { }; /** + * ieee802154_get_fc_from_skb - get the frame control field from an skb + * @skb: skb where the frame control field will be get from + */ +static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) +{ + /* return some invalid fc on failure */ + if (unlikely(skb->len < 2)) { + WARN_ON(1); + return cpu_to_le16(0); + } + + return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb)); +} + +/** * ieee802154_be64_to_le64 - copies and convert be64 to le64 * @le64_dst: le64 destination pointer * @be64_src: be64 source pointer @@ -270,6 +277,16 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) } /** + * ieee802154_le16_to_be16 - copies and convert le16 to be16 + * @be16_dst: be16 destination pointer + * @le16_src: le16 source pointer + */ +static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) +{ + __put_unaligned_memmove16(swab16p(le16_src), be16_dst); +} + +/** * ieee802154_alloc_hw - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h index 4757997f76ed..179253f9dcfd 100644 --- a/include/net/mpls_iptunnel.h +++ b/include/net/mpls_iptunnel.h @@ -18,7 +18,7 @@ struct mpls_iptunnel_encap { u32 label[MAX_NEW_LABELS]; - u32 labels; + u8 labels; }; static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate) diff --git a/include/net/ndisc.h b/include/net/ndisc.h index aba5695fadb0..bf3937431030 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -180,15 +180,13 @@ void ndisc_cleanup(void); int ndisc_rcv(struct sk_buff *skb); -void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, - const struct in6_addr *solicit, +void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, const struct in6_addr *daddr, const struct in6_addr *saddr, struct sk_buff *oskb); void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr); -void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, - const struct in6_addr *daddr, +void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, bool router, bool solicited, bool override, bool inc_opt); diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h index d4c6b5f30acd..e8d1448425a7 100644 --- a/include/net/netfilter/br_netfilter.h +++ b/include/net/netfilter/br_netfilter.h @@ -31,7 +31,7 @@ static inline void nf_bridge_push_encap_header(struct sk_buff *skb) skb->network_header -= len; } -int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb); +int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb); static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) { @@ -45,12 +45,12 @@ struct net_device *setup_pre_routing(struct sk_buff *skb); void br_netfilter_enable(void); #if IS_ENABLED(CONFIG_IPV6) -int br_validate_ipv6(struct sk_buff *skb); -unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, +int br_validate_ipv6(struct net *net, struct sk_buff *skb); +unsigned int br_nf_pre_routing_ipv6(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); #else -static inline int br_validate_ipv6(struct sk_buff *skb) +static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb) { return -1; } diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h index 42008f10dfc4..0a14733e8b82 100644 --- a/include/net/netfilter/ipv4/nf_dup_ipv4.h +++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h @@ -1,7 +1,7 @@ #ifndef _NF_DUP_IPV4_H_ #define _NF_DUP_IPV4_H_ -void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum, +void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in_addr *gw, int oif); #endif /* _NF_DUP_IPV4_H_ */ diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h index 77862c3645f0..df7ecd806aba 100644 --- a/include/net/netfilter/ipv4/nf_reject.h +++ b/include/net/netfilter/ipv4/nf_reject.h @@ -6,7 +6,7 @@ #include <net/icmp.h> void nf_send_unreach(struct sk_buff *skb_in, int code, int hook); -void nf_send_reset(struct sk_buff *oldskb, int hook); +void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook); const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, struct tcphdr *_oth, int hook); diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index 27666d8a0bd0..fb7da5bb76cc 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -5,7 +5,7 @@ void nf_defrag_ipv6_enable(void); int nf_ct_frag6_init(void); void nf_ct_frag6_cleanup(void); -struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); +struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user); void nf_ct_frag6_consume_orig(struct sk_buff *skb); struct inet_frags_ctl; diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h index ed6bd66fa5a0..fa6237b382a3 100644 --- a/include/net/netfilter/ipv6/nf_dup_ipv6.h +++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h @@ -1,7 +1,7 @@ #ifndef _NF_DUP_IPV6_H_ #define _NF_DUP_IPV6_H_ -void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum, +void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in6_addr *gw, int oif); #endif /* _NF_DUP_IPV6_H_ */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index e8ad46834df8..fde4068eec0b 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -183,15 +183,12 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls); void nf_ct_free_hashtable(void *hash, unsigned int size); -struct nf_conntrack_tuple_hash * -__nf_conntrack_find(struct net *net, u16 zone, - const struct nf_conntrack_tuple *tuple); - int nf_conntrack_hash_check_insert(struct nf_conn *ct); bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report); bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, - u_int16_t l3num, struct nf_conntrack_tuple *tuple); + u_int16_t l3num, struct net *net, + struct nf_conntrack_tuple *tuple); bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig); diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index c03f9c42b3cd..788ef58a66b9 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -41,6 +41,7 @@ void nf_conntrack_cleanup_end(void); bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, + struct net *net, struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto); diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 1f7061313d54..956d8a6ac069 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -26,7 +26,7 @@ struct nf_conntrack_l4proto { /* Try to fill in the third arg: dataoff is offset past network protocol hdr. Return true if possible. */ bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, - struct nf_conntrack_tuple *tuple); + struct net *net, struct nf_conntrack_tuple *tuple); /* Invert the per-proto part of the tuple: ie. turn xmit into reply. * Some packets can't be inverted: return 0 in that case. diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 62308713dd7f..f72be38860a7 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -20,10 +20,20 @@ struct ctnl_timeout { }; struct nf_conn_timeout { - struct ctnl_timeout *timeout; + struct ctnl_timeout __rcu *timeout; }; -#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data) +static inline unsigned int * +nf_ct_timeout_data(struct nf_conn_timeout *t) +{ + struct ctnl_timeout *timeout; + + timeout = rcu_dereference(t->timeout); + if (timeout == NULL) + return NULL; + + return (unsigned int *)timeout->data; +} static inline struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct) @@ -47,7 +57,7 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, if (timeout_ext == NULL) return NULL; - timeout_ext->timeout = timeout; + rcu_assign_pointer(timeout_ext->timeout, timeout); return timeout_ext; #else @@ -64,10 +74,13 @@ nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct, unsigned int *timeouts; timeout_ext = nf_ct_timeout_find(ct); - if (timeout_ext) - timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext); - else + if (timeout_ext) { + timeouts = nf_ct_timeout_data(timeout_ext); + if (unlikely(!timeouts)) + timeouts = l4proto->get_timeouts(net); + } else { timeouts = l4proto->get_timeouts(net); + } return timeouts; #else diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h index fbfd1ba4254e..186c54138f35 100644 --- a/include/net/netfilter/nf_nat_core.h +++ b/include/net/netfilter/nf_nat_core.h @@ -10,7 +10,7 @@ unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, struct sk_buff *skb); -int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family); +int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family); static inline int nf_nat_initialized(struct nf_conn *ct, enum nf_nat_manip_type manip) diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h index a3127325f624..aef3e5fc9fd9 100644 --- a/include/net/netfilter/nf_nat_l3proto.h +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -43,31 +43,31 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum); -unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, +unsigned int nf_nat_ipv4_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); -unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, +unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); -unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, +unsigned int nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); -unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, +unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); @@ -76,31 +76,31 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, unsigned int hdrlen); -unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, +unsigned int nf_nat_ipv6_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); -unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, +unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); -unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, +unsigned int nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); -unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, +unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, - unsigned int (*do_chain)(const struct nf_hook_ops *ops, + unsigned int (*do_chain)(void *priv, struct sk_buff *skb, const struct nf_hook_state *state, struct nf_conn *ct)); diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index e8635854a55b..9c5638ad872e 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -32,7 +32,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh); void nf_unregister_queue_handler(void); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); -bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); +void nf_queue_entry_get_refs(struct nf_queue_entry *entry); void nf_queue_entry_release_refs(struct nf_queue_entry *entry); static inline void init_hashrandom(u32 *jhash_initval) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index aa8bee72c9d3..c9149cc0a02d 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -14,9 +14,11 @@ struct nft_pktinfo { struct sk_buff *skb; + struct net *net; const struct net_device *in; const struct net_device *out; - const struct nf_hook_ops *ops; + u8 pf; + u8 hook; u8 nhoff; u8 thoff; u8 tprot; @@ -25,16 +27,15 @@ struct nft_pktinfo { }; static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, - const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) { pkt->skb = skb; + pkt->net = pkt->xt.net = state->net; pkt->in = pkt->xt.in = state->in; pkt->out = pkt->xt.out = state->out; - pkt->ops = ops; - pkt->xt.hooknum = ops->hooknum; - pkt->xt.family = ops->pf; + pkt->hook = pkt->xt.hooknum = state->hook; + pkt->pf = pkt->xt.family = state->pf; } /** @@ -815,8 +816,7 @@ int nft_register_basechain(struct nft_base_chain *basechain, void nft_unregister_basechain(struct nft_base_chain *basechain, unsigned int hook_nops); -unsigned int nft_do_chain(struct nft_pktinfo *pkt, - const struct nf_hook_ops *ops); +unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); /** * struct nft_table - nf_tables table diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index 2df7f96902ee..ca6ef6bf775e 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -6,13 +6,12 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, - const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) { struct iphdr *ip; - nft_set_pktinfo(pkt, ops, skb, state); + nft_set_pktinfo(pkt, skb, state); ip = ip_hdr(pkt->skb); pkt->tprot = ip->protocol; diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index 97db2e3a5e65..8ad39a6a5fe1 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -6,14 +6,13 @@ static inline int nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, - const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) { int protohdr, thoff = 0; unsigned short frag_off; - nft_set_pktinfo(pkt, ops, skb, state); + nft_set_pktinfo(pkt, skb, state); protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); /* If malformed, drop it */ diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h deleted file mode 100644 index aff88ba91391..000000000000 --- a/include/net/netfilter/nfnetlink_queue.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef _NET_NFNL_QUEUE_H_ -#define _NET_NFNL_QUEUE_H_ - -#include <linux/netfilter/nf_conntrack_common.h> - -struct nf_conn; - -#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT -struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size, - enum ip_conntrack_info *ctinfo); -struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb, - const struct nlattr *attr, - enum ip_conntrack_info *ctinfo); -int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo); -void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, int diff); -int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, - u32 portid, u32 report); -#else -inline struct nf_conn * -nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo) -{ - return NULL; -} - -inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb, - const struct nlattr *attr, - enum ip_conntrack_info *ctinfo) -{ - return NULL; -} - -inline int -nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) -{ - return 0; -} - -inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, int diff) -{ -} - -inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, - u32 portid, u32 report) -{ - return 0; -} -#endif /* NF_CONNTRACK */ -#endif diff --git a/include/net/netlink.h b/include/net/netlink.h index 2a5dbcc90d1c..0e3172751755 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1004,6 +1004,15 @@ static inline __be32 nla_get_be32(const struct nlattr *nla) } /** + * nla_get_le32 - return payload of __le32 attribute + * @nla: __le32 netlink attribute + */ +static inline __le32 nla_get_le32(const struct nlattr *nla) +{ + return *(__le32 *) nla_data(nla); +} + +/** * nla_get_u16 - return payload of u16 attribute * @nla: u16 netlink attribute */ @@ -1066,6 +1075,15 @@ static inline __be64 nla_get_be64(const struct nlattr *nla) } /** + * nla_get_le64 - return payload of __le64 attribute + * @nla: __le64 netlink attribute + */ +static inline __le64 nla_get_le64(const struct nlattr *nla) +{ + return *(__le64 *) nla_data(nla); +} + +/** * nla_get_s32 - return payload of s32 attribute * @nla: s32 netlink attribute */ diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index 75d2e1880059..707e3ab816c2 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -35,6 +35,7 @@ #define NCI_MAX_NUM_RF_CONFIGS 10 #define NCI_MAX_NUM_CONN 10 #define NCI_MAX_PARAM_LEN 251 +#define NCI_MAX_PAYLOAD_SIZE 255 #define NCI_MAX_PACKET_SIZE 258 /* NCI Status Codes */ @@ -315,6 +316,8 @@ struct nci_nfcee_mode_set_cmd { __u8 nfcee_mode; } __packed; +#define NCI_OP_CORE_GET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x03) + /* ----------------------- */ /* ---- NCI Responses ---- */ /* ----------------------- */ @@ -375,6 +378,9 @@ struct nci_nfcee_discover_rsp { } __packed; #define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01) + +#define NCI_OP_CORE_GET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x03) + /* --------------------------- */ /* ---- NCI Notifications ---- */ /* --------------------------- */ @@ -528,4 +534,6 @@ struct nci_nfcee_discover_ntf { struct nci_nfcee_information_tlv information_tlv; } __packed; +#define NCI_OP_CORE_RESET_NTF nci_opcode_pack(NCI_GID_CORE, 0x00) + #endif /* __NCI_H */ diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index d0d0f1e53bb9..57ce24fb0047 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -67,7 +67,7 @@ enum nci_state { struct nci_dev; -struct nci_prop_ops { +struct nci_driver_ops { __u16 opcode; int (*rsp)(struct nci_dev *dev, struct sk_buff *skb); int (*ntf)(struct nci_dev *dev, struct sk_buff *skb); @@ -94,8 +94,11 @@ struct nci_ops { void (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb); - struct nci_prop_ops *prop_ops; + struct nci_driver_ops *prop_ops; size_t n_prop_ops; + + struct nci_driver_ops *core_ops; + size_t n_core_ops; }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 @@ -125,6 +128,8 @@ struct nci_conn_info { /* Gates */ #define NCI_HCI_ADMIN_GATE 0x00 +#define NCI_HCI_LOOPBACK_GATE 0x04 +#define NCI_HCI_IDENTITY_MGMT_GATE 0x05 #define NCI_HCI_LINK_MGMT_GATE 0x06 /* Pipes */ @@ -278,10 +283,12 @@ int nci_request(struct nci_dev *ndev, unsigned long opt), unsigned long opt, __u32 timeout); int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload); +int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload); int nci_core_reset(struct nci_dev *ndev); int nci_core_init(struct nci_dev *ndev); int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb); +int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb); int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val); int nci_nfcee_discover(struct nci_dev *ndev, u8 action); @@ -305,6 +312,7 @@ int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx, const u8 *param, size_t param_len); int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx, struct sk_buff **skb); +int nci_hci_clear_all_pipes(struct nci_dev *ndev); int nci_hci_dev_session_init(struct nci_dev *ndev); static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev, @@ -348,9 +356,14 @@ int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb); int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode, struct sk_buff *skb); +int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode, + struct sk_buff *skb); +int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode, + struct sk_buff *skb); void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb); int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload); int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb); +int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id); void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb, __u8 conn_id, int err); void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err); @@ -365,6 +378,7 @@ void nci_clear_target_list(struct nci_dev *ndev); void nci_req_complete(struct nci_dev *ndev, int result); struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev, int conn_id); +int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id); /* ----- NCI status code ----- */ int nci_to_errno(__u8 code); @@ -380,6 +394,12 @@ struct nci_spi { unsigned int xfer_udelay; /* microseconds delay between transactions */ + + unsigned int xfer_speed_hz; /* + * SPI clock frequency + * 0 => default clock + */ + u8 acknowledge_mode; struct completion req_completion; diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 30afc9a6718c..dcfcfc9c00bf 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -68,7 +68,7 @@ struct nfc_ops { int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target, u32 protocol); void (*deactivate_target)(struct nfc_dev *dev, - struct nfc_target *target); + struct nfc_target *target, u8 mode); int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context); diff --git a/include/net/nl802154.h b/include/net/nl802154.h index cf2713d8b975..32cb3e591e07 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -56,6 +56,22 @@ enum nl802154_commands { /* add new commands above here */ +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + NL802154_CMD_SET_SEC_PARAMS, + NL802154_CMD_GET_SEC_KEY, /* can dump */ + NL802154_CMD_NEW_SEC_KEY, + NL802154_CMD_DEL_SEC_KEY, + NL802154_CMD_GET_SEC_DEV, /* can dump */ + NL802154_CMD_NEW_SEC_DEV, + NL802154_CMD_DEL_SEC_DEV, + NL802154_CMD_GET_SEC_DEVKEY, /* can dump */ + NL802154_CMD_NEW_SEC_DEVKEY, + NL802154_CMD_DEL_SEC_DEVKEY, + NL802154_CMD_GET_SEC_LEVEL, /* can dump */ + NL802154_CMD_NEW_SEC_LEVEL, + NL802154_CMD_DEL_SEC_LEVEL, +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + /* used to define NL802154_CMD_MAX below */ __NL802154_CMD_AFTER_LAST, NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 @@ -110,6 +126,18 @@ enum nl802154_attrs { /* add attributes here, update the policy in nl802154.c */ +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + NL802154_ATTR_SEC_ENABLED, + NL802154_ATTR_SEC_OUT_LEVEL, + NL802154_ATTR_SEC_OUT_KEY_ID, + NL802154_ATTR_SEC_FRAME_COUNTER, + + NL802154_ATTR_SEC_LEVEL, + NL802154_ATTR_SEC_DEVICE, + NL802154_ATTR_SEC_DEVKEY, + NL802154_ATTR_SEC_KEY, +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + __NL802154_ATTR_AFTER_LAST, NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 }; @@ -247,4 +275,167 @@ enum nl802154_supported_bool_states { NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 }; +#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL + +enum nl802154_dev_addr_modes { + NL802154_DEV_ADDR_NONE, + __NL802154_DEV_ADDR_INVALID, + NL802154_DEV_ADDR_SHORT, + NL802154_DEV_ADDR_EXTENDED, + + /* keep last */ + __NL802154_DEV_ADDR_AFTER_LAST, + NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1 +}; + +enum nl802154_dev_addr_attrs { + NL802154_DEV_ADDR_ATTR_UNSPEC, + + NL802154_DEV_ADDR_ATTR_PAN_ID, + NL802154_DEV_ADDR_ATTR_MODE, + NL802154_DEV_ADDR_ATTR_SHORT, + NL802154_DEV_ADDR_ATTR_EXTENDED, + + /* keep last */ + __NL802154_DEV_ADDR_ATTR_AFTER_LAST, + NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_key_id_modes { + NL802154_KEY_ID_MODE_IMPLICIT, + NL802154_KEY_ID_MODE_INDEX, + NL802154_KEY_ID_MODE_INDEX_SHORT, + NL802154_KEY_ID_MODE_INDEX_EXTENDED, + + /* keep last */ + __NL802154_KEY_ID_MODE_AFTER_LAST, + NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1 +}; + +enum nl802154_key_id_attrs { + NL802154_KEY_ID_ATTR_UNSPEC, + + NL802154_KEY_ID_ATTR_MODE, + NL802154_KEY_ID_ATTR_INDEX, + NL802154_KEY_ID_ATTR_IMPLICIT, + NL802154_KEY_ID_ATTR_SOURCE_SHORT, + NL802154_KEY_ID_ATTR_SOURCE_EXTENDED, + + /* keep last */ + __NL802154_KEY_ID_ATTR_AFTER_LAST, + NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_seclevels { + NL802154_SECLEVEL_NONE, + NL802154_SECLEVEL_MIC32, + NL802154_SECLEVEL_MIC64, + NL802154_SECLEVEL_MIC128, + NL802154_SECLEVEL_ENC, + NL802154_SECLEVEL_ENC_MIC32, + NL802154_SECLEVEL_ENC_MIC64, + NL802154_SECLEVEL_ENC_MIC128, + + /* keep last */ + __NL802154_SECLEVEL_AFTER_LAST, + NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1 +}; + +enum nl802154_frames { + NL802154_FRAME_BEACON, + NL802154_FRAME_DATA, + NL802154_FRAME_ACK, + NL802154_FRAME_CMD, + + /* keep last */ + __NL802154_FRAME_AFTER_LAST, + NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1 +}; + +enum nl802154_cmd_frames { + __NL802154_CMD_FRAME_INVALID, + NL802154_CMD_FRAME_ASSOC_REQUEST, + NL802154_CMD_FRAME_ASSOC_RESPONSE, + NL802154_CMD_FRAME_DISASSOC_NOTIFY, + NL802154_CMD_FRAME_DATA_REQUEST, + NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY, + NL802154_CMD_FRAME_ORPHAN_NOTIFY, + NL802154_CMD_FRAME_BEACON_REQUEST, + NL802154_CMD_FRAME_COORD_REALIGNMENT, + NL802154_CMD_FRAME_GTS_REQUEST, + + /* keep last */ + __NL802154_CMD_FRAME_AFTER_LAST, + NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1 +}; + +enum nl802154_seclevel_attrs { + NL802154_SECLEVEL_ATTR_UNSPEC, + + NL802154_SECLEVEL_ATTR_LEVELS, + NL802154_SECLEVEL_ATTR_FRAME, + NL802154_SECLEVEL_ATTR_CMD_FRAME, + NL802154_SECLEVEL_ATTR_DEV_OVERRIDE, + + /* keep last */ + __NL802154_SECLEVEL_ATTR_AFTER_LAST, + NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1 +}; + +/* TODO what is this? couldn't find in mib */ +enum { + NL802154_DEVKEY_IGNORE, + NL802154_DEVKEY_RESTRICT, + NL802154_DEVKEY_RECORD, + + /* keep last */ + __NL802154_DEVKEY_AFTER_LAST, + NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1 +}; + +enum nl802154_dev { + NL802154_DEV_ATTR_UNSPEC, + + NL802154_DEV_ATTR_FRAME_COUNTER, + NL802154_DEV_ATTR_PAN_ID, + NL802154_DEV_ATTR_SHORT_ADDR, + NL802154_DEV_ATTR_EXTENDED_ADDR, + NL802154_DEV_ATTR_SECLEVEL_EXEMPT, + NL802154_DEV_ATTR_KEY_MODE, + + /* keep last */ + __NL802154_DEV_ATTR_AFTER_LAST, + NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_devkey { + NL802154_DEVKEY_ATTR_UNSPEC, + + NL802154_DEVKEY_ATTR_FRAME_COUNTER, + NL802154_DEVKEY_ATTR_EXTENDED_ADDR, + NL802154_DEVKEY_ATTR_ID, + + /* keep last */ + __NL802154_DEVKEY_ATTR_AFTER_LAST, + NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_key { + NL802154_KEY_ATTR_UNSPEC, + + NL802154_KEY_ATTR_ID, + NL802154_KEY_ATTR_USAGE_FRAMES, + NL802154_KEY_ATTR_USAGE_CMDS, + NL802154_KEY_ATTR_BYTES, + + /* keep last */ + __NL802154_KEY_ATTR_AFTER_LAST, + NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1 +}; + +#define NL802154_KEY_SIZE 16 +#define NL802154_CMD_FRAME_NR_IDS 256 + +#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ + #endif /* __NL802154_H */ diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 87935cad2f7b..a0dde04eb178 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -32,17 +32,17 @@ struct request_sock_ops { int obj_size; struct kmem_cache *slab; char *slab_name; - int (*rtx_syn_ack)(struct sock *sk, + int (*rtx_syn_ack)(const struct sock *sk, struct request_sock *req); - void (*send_ack)(struct sock *sk, struct sk_buff *skb, + void (*send_ack)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); - void (*send_reset)(struct sock *sk, + void (*send_reset)(const struct sock *sk, struct sk_buff *skb); void (*destructor)(struct request_sock *req); void (*syn_ack_timeout)(const struct request_sock *req); }; -int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); +int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req); /* struct request_sock - mini sock to represent a connection request */ @@ -50,16 +50,15 @@ struct request_sock { struct sock_common __req_common; #define rsk_refcnt __req_common.skc_refcnt #define rsk_hash __req_common.skc_hash +#define rsk_listener __req_common.skc_listener +#define rsk_window_clamp __req_common.skc_window_clamp +#define rsk_rcv_wnd __req_common.skc_rcv_wnd struct request_sock *dl_next; - struct sock *rsk_listener; u16 mss; u8 num_retrans; /* number of retransmits */ u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ u8 num_timeout:7; /* number of timeouts */ - /* The following two fields can be easily recomputed I think -AK */ - u32 window_clamp; /* window clamp at creation time */ - u32 rcv_wnd; /* rcv_wnd offered first time */ u32 ts_recent; struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; @@ -69,15 +68,35 @@ struct request_sock { u32 peer_secid; }; +static inline struct request_sock *inet_reqsk(struct sock *sk) +{ + return (struct request_sock *)sk; +} + +static inline struct sock *req_to_sk(struct request_sock *req) +{ + return (struct sock *)req; +} + static inline struct request_sock * -reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) +reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, + bool attach_listener) { - struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); + struct request_sock *req; + + req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); if (req) { req->rsk_ops = ops; - sock_hold(sk_listener); - req->rsk_listener = sk_listener; + if (attach_listener) { + sock_hold(sk_listener); + req->rsk_listener = sk_listener; + } else { + req->rsk_listener = NULL; + } + req_to_sk(req)->sk_prot = sk_listener->sk_prot; + sk_node_init(&req_to_sk(req)->sk_node); + sk_tx_queue_clear(req_to_sk(req)); req->saved_syn = NULL; /* Following is temporary. It is coupled with debugging * helpers in reqsk_put() & reqsk_free() @@ -87,16 +106,6 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) return req; } -static inline struct request_sock *inet_reqsk(struct sock *sk) -{ - return (struct request_sock *)sk; -} - -static inline struct sock *req_to_sk(struct request_sock *req) -{ - return (struct sock *)req; -} - static inline void reqsk_free(struct request_sock *req) { /* temporary debugging */ @@ -117,26 +126,6 @@ static inline void reqsk_put(struct request_sock *req) extern int sysctl_max_syn_backlog; -/** struct listen_sock - listen state - * - * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs - */ -struct listen_sock { - int qlen_inc; /* protected by listener lock */ - int young_inc;/* protected by listener lock */ - - /* following fields can be updated by timer */ - atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */ - atomic_t young_dec; - - u8 max_qlen_log ____cacheline_aligned_in_smp; - u8 synflood_warned; - /* 2 bytes hole, try to use */ - u32 hash_rnd; - u32 nr_table_entries; - struct request_sock *syn_table[0]; -}; - /* * For a TCP Fast Open listener - * lock - protects the access to all the reqsk, which is co-owned by @@ -170,127 +159,72 @@ struct fastopen_queue { * @rskq_accept_head - FIFO head of established children * @rskq_accept_tail - FIFO tail of established children * @rskq_defer_accept - User waits for some data after accept() - * @syn_wait_lock - serializer - * - * %syn_wait_lock is necessary only to avoid proc interface having to grab the main - * lock sock while browsing the listening hash (otherwise it's deadlock prone). * */ struct request_sock_queue { + spinlock_t rskq_lock; + u8 rskq_defer_accept; + + u32 synflood_warned; + atomic_t qlen; + atomic_t young; + struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_tail; - u8 rskq_defer_accept; - struct listen_sock *listen_opt; - struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been - * enabled on this listener. Check - * max_qlen != 0 in fastopen_queue - * to determine if TFO is enabled - * right at this moment. + struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine + * if TFO is enabled. */ - - /* temporary alignment, our goal is to get rid of this lock */ - spinlock_t syn_wait_lock ____cacheline_aligned_in_smp; }; -int reqsk_queue_alloc(struct request_sock_queue *queue, - unsigned int nr_table_entries); +void reqsk_queue_alloc(struct request_sock_queue *queue); -void __reqsk_queue_destroy(struct request_sock_queue *queue); -void reqsk_queue_destroy(struct request_sock_queue *queue); void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, bool reset); -static inline struct request_sock * - reqsk_queue_yank_acceptq(struct request_sock_queue *queue) -{ - struct request_sock *req = queue->rskq_accept_head; - - queue->rskq_accept_head = NULL; - return req; -} - -static inline int reqsk_queue_empty(struct request_sock_queue *queue) +static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) { return queue->rskq_accept_head == NULL; } -static inline void reqsk_queue_add(struct request_sock_queue *queue, - struct request_sock *req, - struct sock *parent, - struct sock *child) +static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, + struct sock *parent) { - req->sk = child; - sk_acceptq_added(parent); - - if (queue->rskq_accept_head == NULL) - queue->rskq_accept_head = req; - else - queue->rskq_accept_tail->dl_next = req; - - queue->rskq_accept_tail = req; - req->dl_next = NULL; -} - -static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) -{ - struct request_sock *req = queue->rskq_accept_head; - - WARN_ON(req == NULL); - - queue->rskq_accept_head = req->dl_next; - if (queue->rskq_accept_head == NULL) - queue->rskq_accept_tail = NULL; + struct request_sock *req; + spin_lock_bh(&queue->rskq_lock); + req = queue->rskq_accept_head; + if (req) { + sk_acceptq_removed(parent); + queue->rskq_accept_head = req->dl_next; + if (queue->rskq_accept_head == NULL) + queue->rskq_accept_tail = NULL; + } + spin_unlock_bh(&queue->rskq_lock); return req; } static inline void reqsk_queue_removed(struct request_sock_queue *queue, const struct request_sock *req) { - struct listen_sock *lopt = queue->listen_opt; - if (req->num_timeout == 0) - atomic_inc(&lopt->young_dec); - atomic_inc(&lopt->qlen_dec); + atomic_dec(&queue->young); + atomic_dec(&queue->qlen); } static inline void reqsk_queue_added(struct request_sock_queue *queue) { - struct listen_sock *lopt = queue->listen_opt; - - lopt->young_inc++; - lopt->qlen_inc++; -} - -static inline int listen_sock_qlen(const struct listen_sock *lopt) -{ - return lopt->qlen_inc - atomic_read(&lopt->qlen_dec); -} - -static inline int listen_sock_young(const struct listen_sock *lopt) -{ - return lopt->young_inc - atomic_read(&lopt->young_dec); + atomic_inc(&queue->young); + atomic_inc(&queue->qlen); } static inline int reqsk_queue_len(const struct request_sock_queue *queue) { - const struct listen_sock *lopt = queue->listen_opt; - - return lopt ? listen_sock_qlen(lopt) : 0; + return atomic_read(&queue->qlen); } static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) { - return listen_sock_young(queue->listen_opt); + return atomic_read(&queue->young); } -static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) -{ - return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log; -} - -void reqsk_queue_hash_req(struct request_sock_queue *queue, - u32 hash, struct request_sock *req, - unsigned long timeout); - #endif /* _REQUEST_SOCK_H */ diff --git a/include/net/route.h b/include/net/route.h index cc61cb95f059..ee81307863d5 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -28,6 +28,8 @@ #include <net/inetpeer.h> #include <net/flow.h> #include <net/inet_sock.h> +#include <net/ip_fib.h> +#include <net/l3mdev.h> #include <linux/in_route.h> #include <linux/rtnetlink.h> #include <linux/rcupdate.h> @@ -64,6 +66,8 @@ struct rtable { /* Miscellaneous cached information */ u32 rt_pmtu; + u32 rt_table_id; + struct list_head rt_uncached; struct uncached_list *rt_uncached_list; }; @@ -110,9 +114,17 @@ struct in_device; int ip_rt_init(void); void rt_cache_flush(struct net *net); void rt_flush_dev(struct net_device *dev); -struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); +struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp, + int mp_hash); + +static inline struct rtable *__ip_route_output_key(struct net *net, + struct flowi4 *flp) +{ + return __ip_route_output_key_hash(net, flp, -1); +} + struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, - struct sock *sk); + const struct sock *sk); struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig); @@ -254,9 +266,6 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 if (inet_sk(sk)->transparent) flow_flags |= FLOWI_FLAG_ANYSRC; - if (netif_index_is_vrf(sock_net(sk), oif)) - flow_flags |= FLOWI_FLAG_VRFSRC; - flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, protocol, flow_flags, dst, src, dport, sport); } @@ -273,6 +282,10 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, ip_route_connect_init(fl4, dst, src, tos, oif, protocol, sport, dport, sk); + if (!src && oif) { + l3mdev_get_saddr(net, oif, fl4); + src = fl4->saddr; + } if (!dst || !src) { rt = __ip_route_output_key(net, fl4); if (IS_ERR(rt)) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 18fdb98185ab..2f87c1ba13de 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -122,8 +122,10 @@ struct rtnl_af_ops { int family; int (*fill_link_af)(struct sk_buff *skb, - const struct net_device *dev); - size_t (*get_link_af_size)(const struct net_device *dev); + const struct net_device *dev, + u32 ext_filter_mask); + size_t (*get_link_af_size)(const struct net_device *dev, + u32 ext_filter_mask); int (*validate_link_af)(const struct net_device *dev, const struct nlattr *attr); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 444faa89a55f..4c79ce8c1f92 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -251,7 +251,7 @@ struct tcf_proto { struct qdisc_skb_cb { unsigned int pkt_len; u16 slave_dev_queue_mapping; - u16 _pad; + u16 tc_classid; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; }; @@ -402,6 +402,7 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); bool tcf_destroy(struct tcf_proto *tp, bool force); void tcf_destroy_chain(struct tcf_proto __rcu **fl); +int skb_do_redirect(struct sk_buff *); /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) diff --git a/include/net/sock.h b/include/net/sock.h index 7aa78440559a..f570e75e3da9 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -150,6 +150,10 @@ typedef __u64 __bitwise __addrpair; * @skc_node: main hash linkage for various protocol lookup tables * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol * @skc_tx_queue_mapping: tx queue number for this connection + * @skc_flags: place holder for sk_flags + * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, + * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings + * @skc_incoming_cpu: record/match cpu processing incoming packets * @skc_refcnt: reference count * * This is the minimal network layer representation of sockets, the header @@ -200,6 +204,16 @@ struct sock_common { atomic64_t skc_cookie; + /* following fields are padding to force + * offset(struct sock, sk_refcnt) == 128 on 64bit arches + * assuming IPV6 is enabled. We use this padding differently + * for different kind of 'sockets' + */ + union { + unsigned long skc_flags; + struct sock *skc_listener; /* request_sock */ + struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */ + }; /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() @@ -212,9 +226,20 @@ struct sock_common { struct hlist_nulls_node skc_nulls_node; }; int skc_tx_queue_mapping; + union { + int skc_incoming_cpu; + u32 skc_rcv_wnd; + u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */ + }; + atomic_t skc_refcnt; /* private: */ int skc_dontcopy_end[0]; + union { + u32 skc_rxhash; + u32 skc_window_clamp; + u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */ + }; /* public: */ }; @@ -243,8 +268,6 @@ struct cg_proto; * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes - * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, - * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets * @sk_no_check_rx: allow zero checksum in RX packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) @@ -273,8 +296,6 @@ struct cg_proto; * @sk_rcvlowat: %SO_RCVLOWAT setting * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting - * @sk_rxhash: flow hash received from netif layer - * @sk_incoming_cpu: record cpu processing incoming packets * @sk_txhash: computed flow hash for use on transmit * @sk_filter: socket filtering instructions * @sk_timer: sock cleanup timer @@ -331,6 +352,9 @@ struct sock { #define sk_v6_daddr __sk_common.skc_v6_daddr #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr #define sk_cookie __sk_common.skc_cookie +#define sk_incoming_cpu __sk_common.skc_incoming_cpu +#define sk_flags __sk_common.skc_flags +#define sk_rxhash __sk_common.skc_rxhash socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; @@ -350,14 +374,6 @@ struct sock { } sk_backlog; #define sk_rmem_alloc sk_backlog.rmem_alloc int sk_forward_alloc; -#ifdef CONFIG_RPS - __u32 sk_rxhash; -#endif - u16 sk_incoming_cpu; - /* 16bit hole - * Warned : sk_incoming_cpu can be set from softirq, - * Do not use this hole without fully understanding possible issues. - */ __u32 sk_txhash; #ifdef CONFIG_NET_RX_BUSY_POLL @@ -373,7 +389,6 @@ struct sock { #ifdef CONFIG_XFRM struct xfrm_policy *sk_policy[2]; #endif - unsigned long sk_flags; struct dst_entry *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; spinlock_t sk_dst_lock; @@ -759,7 +774,7 @@ static inline int sk_memalloc_socks(void) #endif -static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask) +static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask) { return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); } @@ -828,6 +843,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s if (sk_rcvqueues_full(sk, limit)) return -ENOBUFS; + /* + * If the skb was allocated from pfmemalloc reserves, only + * allow SOCK_MEMALLOC sockets to use it as this socket is + * helping free memory + */ + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) + return -ENOMEM; + __sk_add_backlog(sk, skb); sk->sk_backlog.len += skb->truesize; return 0; @@ -1514,6 +1537,13 @@ void sock_kfree_s(struct sock *sk, void *mem, int size); void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); +struct sockcm_cookie { + u32 mark; +}; + +int sock_cmsg_send(struct sock *sk, struct msghdr *msg, + struct sockcm_cookie *sockc); + /* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. @@ -1654,12 +1684,16 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) kuid_t sock_i_uid(struct sock *sk); unsigned long sock_i_ino(struct sock *sk); -static inline void sk_set_txhash(struct sock *sk) +static inline u32 net_tx_rndhash(void) { - sk->sk_txhash = prandom_u32(); + u32 v = prandom_u32(); - if (unlikely(!sk->sk_txhash)) - sk->sk_txhash = 1; + return v ?: 1; +} + +static inline void sk_set_txhash(struct sock *sk) +{ + sk->sk_txhash = net_tx_rndhash(); } static inline void sk_rethink_txhash(struct sock *sk) @@ -1917,6 +1951,8 @@ static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) } } +void skb_set_owner_w(struct sk_buff *skb, struct sock *sk); + /* * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in @@ -1925,21 +1961,6 @@ static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) * Inlined as it's very short and called for pretty much every * packet ever received. */ - -static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) -{ - skb_orphan(skb); - skb->sk = sk; - skb->destructor = sock_wfree; - skb_set_hash_from_sk(skb, sk); - /* - * We used to take a refcount on sk, but following operation - * is enough to guarantee sk_free() wont free this sock until - * all in-flight packets are completed - */ - atomic_add(skb->truesize, &sk->sk_wmem_alloc); -} - static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); @@ -2197,6 +2218,14 @@ static inline bool sk_fullsock(const struct sock *sk) return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); } +/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV + * SYNACK messages can be attached to either ones (depending on SYNCOOKIE) + */ +static inline bool sk_listener(const struct sock *sk) +{ + return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); +} + void sock_enable_timestamp(struct sock *sk, int flag); int sock_get_timestamp(struct sock *, struct timeval __user *); int sock_get_timestampns(struct sock *, struct timespec __user *); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 319baab3b48e..bc865e244efe 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -1,6 +1,6 @@ /* * include/net/switchdev.h - Switch device API - * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us> * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com> * * This program is free software; you can redistribute it and/or modify @@ -13,70 +13,109 @@ #include <linux/netdevice.h> #include <linux/notifier.h> +#include <linux/list.h> +#include <net/ip_fib.h> #define SWITCHDEV_F_NO_RECURSE BIT(0) +#define SWITCHDEV_F_SKIP_EOPNOTSUPP BIT(1) +#define SWITCHDEV_F_DEFER BIT(2) -enum switchdev_trans { - SWITCHDEV_TRANS_NONE, - SWITCHDEV_TRANS_PREPARE, - SWITCHDEV_TRANS_ABORT, - SWITCHDEV_TRANS_COMMIT, +struct switchdev_trans_item { + struct list_head list; + void *data; + void (*destructor)(const void *data); }; +struct switchdev_trans { + struct list_head item_list; + bool ph_prepare; +}; + +static inline bool switchdev_trans_ph_prepare(struct switchdev_trans *trans) +{ + return trans && trans->ph_prepare; +} + +static inline bool switchdev_trans_ph_commit(struct switchdev_trans *trans) +{ + return trans && !trans->ph_prepare; +} + enum switchdev_attr_id { - SWITCHDEV_ATTR_UNDEFINED, - SWITCHDEV_ATTR_PORT_PARENT_ID, - SWITCHDEV_ATTR_PORT_STP_STATE, - SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, + SWITCHDEV_ATTR_ID_UNDEFINED, + SWITCHDEV_ATTR_ID_PORT_PARENT_ID, + SWITCHDEV_ATTR_ID_PORT_STP_STATE, + SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, + SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME, }; struct switchdev_attr { enum switchdev_attr_id id; - enum switchdev_trans trans; u32 flags; union { struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ u8 stp_state; /* PORT_STP_STATE */ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ + u32 ageing_time; /* BRIDGE_AGEING_TIME */ } u; }; -struct fib_info; - enum switchdev_obj_id { - SWITCHDEV_OBJ_UNDEFINED, - SWITCHDEV_OBJ_PORT_VLAN, - SWITCHDEV_OBJ_IPV4_FIB, - SWITCHDEV_OBJ_PORT_FDB, + SWITCHDEV_OBJ_ID_UNDEFINED, + SWITCHDEV_OBJ_ID_PORT_VLAN, + SWITCHDEV_OBJ_ID_IPV4_FIB, + SWITCHDEV_OBJ_ID_PORT_FDB, }; struct switchdev_obj { enum switchdev_obj_id id; - enum switchdev_trans trans; - int (*cb)(struct net_device *dev, struct switchdev_obj *obj); - union { - struct switchdev_obj_vlan { /* PORT_VLAN */ - u16 flags; - u16 vid_begin; - u16 vid_end; - } vlan; - struct switchdev_obj_ipv4_fib { /* IPV4_FIB */ - u32 dst; - int dst_len; - struct fib_info *fi; - u8 tos; - u8 type; - u32 nlflags; - u32 tb_id; - } ipv4_fib; - struct switchdev_obj_fdb { /* PORT_FDB */ - const unsigned char *addr; - u16 vid; - u16 ndm_state; - } fdb; - } u; + u32 flags; }; +/* SWITCHDEV_OBJ_ID_PORT_VLAN */ +struct switchdev_obj_port_vlan { + struct switchdev_obj obj; + u16 flags; + u16 vid_begin; + u16 vid_end; +}; + +#define SWITCHDEV_OBJ_PORT_VLAN(obj) \ + container_of(obj, struct switchdev_obj_port_vlan, obj) + +/* SWITCHDEV_OBJ_ID_IPV4_FIB */ +struct switchdev_obj_ipv4_fib { + struct switchdev_obj obj; + u32 dst; + int dst_len; + struct fib_info fi; + u8 tos; + u8 type; + u32 nlflags; + u32 tb_id; +}; + +#define SWITCHDEV_OBJ_IPV4_FIB(obj) \ + container_of(obj, struct switchdev_obj_ipv4_fib, obj) + +/* SWITCHDEV_OBJ_ID_PORT_FDB */ +struct switchdev_obj_port_fdb { + struct switchdev_obj obj; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 ndm_state; +}; + +#define SWITCHDEV_OBJ_PORT_FDB(obj) \ + container_of(obj, struct switchdev_obj_port_fdb, obj) + +void switchdev_trans_item_enqueue(struct switchdev_trans *trans, + void *data, void (*destructor)(void const *), + struct switchdev_trans_item *tritem); +void *switchdev_trans_item_dequeue(struct switchdev_trans *trans); + +typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); + /** * struct switchdev_ops - switchdev operations * @@ -84,23 +123,26 @@ struct switchdev_obj { * * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr). * - * @switchdev_port_obj_add: Add an object to port (see switchdev_obj). + * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*). * - * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj). + * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*). * - * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj). + * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*). */ struct switchdev_ops { int (*switchdev_port_attr_get)(struct net_device *dev, struct switchdev_attr *attr); int (*switchdev_port_attr_set)(struct net_device *dev, - struct switchdev_attr *attr); + const struct switchdev_attr *attr, + struct switchdev_trans *trans); int (*switchdev_port_obj_add)(struct net_device *dev, - struct switchdev_obj *obj); + const struct switchdev_obj *obj, + struct switchdev_trans *trans); int (*switchdev_port_obj_del)(struct net_device *dev, - struct switchdev_obj *obj); + const struct switchdev_obj *obj); int (*switchdev_port_obj_dump)(struct net_device *dev, - struct switchdev_obj *obj); + struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb); }; enum switchdev_notifier_type { @@ -126,13 +168,17 @@ switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info) #ifdef CONFIG_NET_SWITCHDEV +void switchdev_deferred_process(void); int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr); int switchdev_port_attr_set(struct net_device *dev, - struct switchdev_attr *attr); -int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj); -int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj); -int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj); + const struct switchdev_attr *attr); +int switchdev_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj); +int switchdev_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj); +int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb); int register_switchdev_notifier(struct notifier_block *nb); int unregister_switchdev_notifier(struct notifier_block *nb); int call_switchdev_notifiers(unsigned long val, struct net_device *dev, @@ -164,6 +210,10 @@ void switchdev_port_fwd_mark_set(struct net_device *dev, #else +static inline void switchdev_deferred_process(void) +{ +} + static inline int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { @@ -171,25 +221,26 @@ static inline int switchdev_port_attr_get(struct net_device *dev, } static inline int switchdev_port_attr_set(struct net_device *dev, - struct switchdev_attr *attr) + const struct switchdev_attr *attr) { return -EOPNOTSUPP; } static inline int switchdev_port_obj_add(struct net_device *dev, - struct switchdev_obj *obj) + const struct switchdev_obj *obj) { return -EOPNOTSUPP; } static inline int switchdev_port_obj_del(struct net_device *dev, - struct switchdev_obj *obj) + const struct switchdev_obj *obj) { return -EOPNOTSUPP; } static inline int switchdev_port_obj_dump(struct net_device *dev, - struct switchdev_obj *obj) + const struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb) { return -EOPNOTSUPP; } diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h index 5c1104c2e24f..02caa406611b 100644 --- a/include/net/tc_act/tc_connmark.h +++ b/include/net/tc_act/tc_connmark.h @@ -5,6 +5,7 @@ struct tcf_connmark_info { struct tcf_common common; + struct net *net; u16 zone; }; diff --git a/include/net/tcp.h b/include/net/tcp.h index 0cab28cd43a9..f80e74c5ad18 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -279,6 +279,7 @@ extern int sysctl_tcp_limit_output_bytes; extern int sysctl_tcp_challenge_ack_limit; extern unsigned int sysctl_tcp_notsent_lowat; extern int sysctl_tcp_min_tso_segs; +extern int sysctl_tcp_min_rtt_wlen; extern int sysctl_tcp_autocorking; extern int sysctl_tcp_invalid_ratelimit; extern int sysctl_tcp_pacing_ss_ratio; @@ -365,8 +366,7 @@ void tcp_wfree(struct sk_buff *skb); void tcp_write_timer_handler(struct sock *sk); void tcp_delack_timer_handler(struct sock *sk); int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); -int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len); +int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len); void tcp_rcv_space_adjust(struct sock *sk); @@ -451,19 +451,22 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); void tcp_req_err(struct sock *sk, u32 seq); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); -struct sock *tcp_create_openreq_child(struct sock *sk, +struct sock *tcp_create_openreq_child(const struct sock *sk, struct request_sock *req, struct sk_buff *skb); void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); -struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, +struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct dst_entry *dst); + struct dst_entry *dst, + struct request_sock *req_unhash, + bool *own_req); int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int tcp_connect(struct sock *sk); -struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, +struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct tcp_fastopen_cookie *foc); + struct tcp_fastopen_cookie *foc, + bool attach_req); int tcp_disconnect(struct sock *sk, int flags); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); @@ -492,8 +495,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); /* syncookies: remember time of last synqueue overflow * But do not dirty this field too often (once per second is enough) + * It is racy as we do not hold a lock, but race is very minor. */ -static inline void tcp_synq_overflow(struct sock *sk) +static inline void tcp_synq_overflow(const struct sock *sk) { unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; unsigned long now = jiffies; @@ -520,8 +524,7 @@ static inline u32 tcp_cookie_time(void) u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); -__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, - __u16 *mss); +__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); __u32 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(struct tcp_options_received *opt); bool cookie_ecn_ok(const struct tcp_options_received *opt, @@ -534,8 +537,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); -__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, - __u16 *mss); +__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); #endif /* tcp_output.c */ @@ -565,7 +567,9 @@ bool tcp_schedule_loss_probe(struct sock *sk); /* tcp_input.c */ void tcp_resume_early_retransmit(struct sock *sk); void tcp_rearm_rto(struct sock *sk); +void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); void tcp_reset(struct sock *sk); +void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); @@ -671,6 +675,12 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) return dst_metric_locked(dst, RTAX_CC_ALGO); } +/* Minimum RTT in usec. ~0 means not available. */ +static inline u32 tcp_min_rtt(const struct tcp_sock *tp) +{ + return tp->rtt_min[0].rtt; +} + /* Compute the actual receive window we are currently advertising. * Rcv_nxt can be after the window if our peer push more data * than the offered window. @@ -1206,7 +1216,8 @@ static inline int tcp_full_space(const struct sock *sk) } extern void tcp_openreq_init_rwin(struct request_sock *req, - struct sock *sk, struct dst_entry *dst); + const struct sock *sk_listener, + const struct dst_entry *dst); void tcp_enter_memory_pressure(struct sock *sk); @@ -1370,16 +1381,16 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, const u8 *newkey, u8 newkeylen, gfp_t gfp); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family); -struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, +struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk); #ifdef CONFIG_TCP_MD5SIG -struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, +struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family); #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #else -static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, +static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family) { @@ -1420,10 +1431,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp); extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; int tcp_fastopen_reset_cipher(void *key, unsigned int len); -bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct tcp_fastopen_cookie *foc, - struct dst_entry *dst); +struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct tcp_fastopen_cookie *foc, + struct dst_entry *dst); void tcp_fastopen_init_key_once(bool publish); #define TCP_FASTOPEN_KEY_LENGTH 16 @@ -1618,7 +1629,6 @@ static inline bool tcp_stream_is_thin(struct tcp_sock *tp) /* /proc */ enum tcp_seq_states { TCP_SEQ_STATE_LISTENING, - TCP_SEQ_STATE_OPENREQ, TCP_SEQ_STATE_ESTABLISHED, }; @@ -1637,7 +1647,6 @@ struct tcp_iter_state { enum tcp_seq_states state; struct sock *syn_wait_sk; int bucket, offset, sbucket, num; - kuid_t uid; loff_t last_pos; }; @@ -1674,7 +1683,7 @@ int tcp4_proc_init(void); void tcp4_proc_exit(void); #endif -int tcp_rtx_synack(struct sock *sk, struct request_sock *req); +int tcp_rtx_synack(const struct sock *sk, struct request_sock *req); int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb); @@ -1682,7 +1691,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, /* TCP af-specific functions */ struct tcp_sock_af_ops { #ifdef CONFIG_TCP_MD5SIG - struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk, + struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk, const struct sock *addr_sk); int (*calc_md5_hash)(char *location, const struct tcp_md5sig_key *md5, @@ -1697,40 +1706,42 @@ struct tcp_sock_af_ops { struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG - struct tcp_md5sig_key *(*req_md5_lookup)(struct sock *sk, + struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk, const struct sock *addr_sk); int (*calc_md5_hash) (char *location, const struct tcp_md5sig_key *md5, const struct sock *sk, const struct sk_buff *skb); #endif - void (*init_req)(struct request_sock *req, struct sock *sk, + void (*init_req)(struct request_sock *req, + const struct sock *sk_listener, struct sk_buff *skb); #ifdef CONFIG_SYN_COOKIES - __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb, + __u32 (*cookie_init_seq)(const struct sk_buff *skb, __u16 *mss); #endif - struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl, + struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, const struct request_sock *req, bool *strict); __u32 (*init_seq)(const struct sk_buff *skb); - int (*send_synack)(struct sock *sk, struct dst_entry *dst, + int (*send_synack)(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, - u16 queue_mapping, struct tcp_fastopen_cookie *foc); - void (*queue_hash_add)(struct sock *sk, struct request_sock *req, - const unsigned long timeout); + struct tcp_fastopen_cookie *foc, + bool attach_req); }; #ifdef CONFIG_SYN_COOKIES static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, - struct sock *sk, struct sk_buff *skb, + const struct sock *sk, struct sk_buff *skb, __u16 *mss) { - return ops->cookie_init_seq(sk, skb, mss); + tcp_synq_overflow(sk); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); + return ops->cookie_init_seq(skb, mss); } #else static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, - struct sock *sk, struct sk_buff *skb, + const struct sock *sk, struct sk_buff *skb, __u16 *mss) { return 0; @@ -1742,6 +1753,19 @@ int tcpv4_offload_init(void); void tcp_v4_init(void); void tcp_init(void); +/* tcp_recovery.c */ + +/* Flags to enable various loss recovery features. See below */ +extern int sysctl_tcp_recovery; + +/* Use TCP RACK to detect (some) tail and retransmit losses */ +#define TCP_RACK_LOST_RETRANS 0x1 + +extern int tcp_rack_mark_lost(struct sock *sk); + +extern void tcp_rack_advance(struct tcp_sock *tp, + const struct skb_mstamp *xmit_time, u8 sacked); + /* * Save and compile IPv4 options, return a pointer to it */ diff --git a/include/net/tso.h b/include/net/tso.h index 47e5444f7d15..b7be852bfe9d 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -8,6 +8,7 @@ struct tso_t { void *data; size_t size; u16 ip_id; + bool ipv6; u32 tcp_seq; }; diff --git a/include/net/vrf.h b/include/net/vrf.h deleted file mode 100644 index 593e6094ddd4..000000000000 --- a/include/net/vrf.h +++ /dev/null @@ -1,178 +0,0 @@ -/* - * include/net/net_vrf.h - adds vrf dev structure definitions - * Copyright (c) 2015 Cumulus Networks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __LINUX_NET_VRF_H -#define __LINUX_NET_VRF_H - -struct net_vrf_dev { - struct rcu_head rcu; - int ifindex; /* ifindex of master dev */ - u32 tb_id; /* table id for VRF */ -}; - -struct slave { - struct list_head list; - struct net_device *dev; -}; - -struct slave_queue { - struct list_head all_slaves; -}; - -struct net_vrf { - struct slave_queue queue; - struct rtable *rth; - u32 tb_id; -}; - - -#if IS_ENABLED(CONFIG_NET_VRF) -/* called with rcu_read_lock() */ -static inline int vrf_master_ifindex_rcu(const struct net_device *dev) -{ - struct net_vrf_dev *vrf_ptr; - int ifindex = 0; - - if (!dev) - return 0; - - if (netif_is_vrf(dev)) { - ifindex = dev->ifindex; - } else { - vrf_ptr = rcu_dereference(dev->vrf_ptr); - if (vrf_ptr) - ifindex = vrf_ptr->ifindex; - } - - return ifindex; -} - -static inline int vrf_master_ifindex(const struct net_device *dev) -{ - int ifindex; - - rcu_read_lock(); - ifindex = vrf_master_ifindex_rcu(dev); - rcu_read_unlock(); - - return ifindex; -} - -/* called with rcu_read_lock */ -static inline u32 vrf_dev_table_rcu(const struct net_device *dev) -{ - u32 tb_id = 0; - - if (dev) { - struct net_vrf_dev *vrf_ptr; - - vrf_ptr = rcu_dereference(dev->vrf_ptr); - if (vrf_ptr) - tb_id = vrf_ptr->tb_id; - } - return tb_id; -} - -static inline u32 vrf_dev_table(const struct net_device *dev) -{ - u32 tb_id; - - rcu_read_lock(); - tb_id = vrf_dev_table_rcu(dev); - rcu_read_unlock(); - - return tb_id; -} - -static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex) -{ - struct net_device *dev; - u32 tb_id = 0; - - if (!ifindex) - return 0; - - rcu_read_lock(); - - dev = dev_get_by_index_rcu(net, ifindex); - if (dev) - tb_id = vrf_dev_table_rcu(dev); - - rcu_read_unlock(); - - return tb_id; -} - -/* called with rtnl */ -static inline u32 vrf_dev_table_rtnl(const struct net_device *dev) -{ - u32 tb_id = 0; - - if (dev) { - struct net_vrf_dev *vrf_ptr; - - vrf_ptr = rtnl_dereference(dev->vrf_ptr); - if (vrf_ptr) - tb_id = vrf_ptr->tb_id; - } - return tb_id; -} - -/* caller has already checked netif_is_vrf(dev) */ -static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev) -{ - struct rtable *rth = ERR_PTR(-ENETUNREACH); - struct net_vrf *vrf = netdev_priv(dev); - - if (vrf) { - rth = vrf->rth; - atomic_inc(&rth->dst.__refcnt); - } - return rth; -} - -#else -static inline int vrf_master_ifindex_rcu(const struct net_device *dev) -{ - return 0; -} - -static inline int vrf_master_ifindex(const struct net_device *dev) -{ - return 0; -} - -static inline u32 vrf_dev_table_rcu(const struct net_device *dev) -{ - return 0; -} - -static inline u32 vrf_dev_table(const struct net_device *dev) -{ - return 0; -} - -static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex) -{ - return 0; -} - -static inline u32 vrf_dev_table_rtnl(const struct net_device *dev) -{ - return 0; -} - -static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev) -{ - return ERR_PTR(-ENETUNREACH); -} -#endif - -#endif /* __LINUX_NET_VRF_H */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 480a319b4c92..c1c899c3a51b 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -152,7 +152,10 @@ struct vxlan_config { struct vxlan_dev { struct hlist_node hlist; /* vni hash table */ struct list_head next; /* vxlan's per namespace list */ - struct vxlan_sock *vn_sock; /* listening socket */ + struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */ +#if IS_ENABLED(CONFIG_IPV6) + struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */ +#endif struct net_device *dev; struct net *net; /* netns for packet i/o */ struct vxlan_rdst default_dst; /* default destination */ @@ -195,9 +198,14 @@ struct vxlan_dev { struct net_device *vxlan_dev_create(struct net *net, const char *name, u8 name_assign_type, struct vxlan_config *conf); -static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan) +static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan, + unsigned short family) { - return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport; +#if IS_ENABLED(CONFIG_IPV6) + if (family == AF_INET6) + return inet_sk(vxlan->vn6_sock->sock->sk)->inet_sport; +#endif + return inet_sk(vxlan->vn4_sock->sock->sk)->inet_sport; } static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 312e3fee9ccf..4a9c21f9b4ea 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -296,8 +296,6 @@ struct xfrm_policy_afinfo { struct flowi *fl, int reverse); int (*get_tos)(const struct flowi *fl); - void (*init_dst)(struct net *net, - struct xfrm_dst *dst); int (*init_path)(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len); @@ -335,7 +333,7 @@ struct xfrm_state_afinfo { const xfrm_address_t *saddr); int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); - int (*output)(struct sock *sk, struct sk_buff *skb); + int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*output_finish)(struct sock *sk, struct sk_buff *skb); int (*extract_input)(struct xfrm_state *x, struct sk_buff *skb); @@ -1529,7 +1527,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb); -int xfrm4_output(struct sock *sk, struct sk_buff *skb); +int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err); int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); @@ -1554,7 +1552,7 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); -int xfrm6_output(struct sock *sk, struct sk_buff *skb); +int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr); diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 391dae1931c0..a0fa975cd1c1 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -294,8 +294,8 @@ struct opa_port_states { struct opa_port_state_info { struct opa_port_states port_states; - u16 link_width_downgrade_tx_active; - u16 link_width_downgrade_rx_active; + __be16 link_width_downgrade_tx_active; + __be16 link_width_downgrade_rx_active; }; struct opa_port_info { diff --git a/include/sound/da7213.h b/include/sound/da7213.h index 673f5c39cbf2..e7eac8979995 100644 --- a/include/sound/da7213.h +++ b/include/sound/da7213.h @@ -44,9 +44,6 @@ struct da7213_platform_data { enum da7213_dmic_data_sel dmic_data_sel; enum da7213_dmic_samplephase dmic_samplephase; enum da7213_dmic_clk_rate dmic_clk_rate; - - /* MCLK squaring config */ - bool mclk_squaring; }; #endif /* _DA7213_PDATA_H */ diff --git a/include/sound/da7219-aad.h b/include/sound/da7219-aad.h new file mode 100644 index 000000000000..17802fb86ec4 --- /dev/null +++ b/include/sound/da7219-aad.h @@ -0,0 +1,99 @@ +/* + * da7219-aad.h - DA7322 ASoC Codec AAD Driver Platform Data + * + * Copyright (c) 2015 Dialog Semiconductor Ltd. + * + * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __DA7219_AAD_PDATA_H +#define __DA7219_AAD_PDATA_H + +enum da7219_aad_micbias_pulse_lvl { + DA7219_AAD_MICBIAS_PULSE_LVL_OFF = 0, + DA7219_AAD_MICBIAS_PULSE_LVL_2_8V = 6, + DA7219_AAD_MICBIAS_PULSE_LVL_2_9V, +}; + +enum da7219_aad_btn_cfg { + DA7219_AAD_BTN_CFG_2MS = 1, + DA7219_AAD_BTN_CFG_5MS, + DA7219_AAD_BTN_CFG_10MS, + DA7219_AAD_BTN_CFG_50MS, + DA7219_AAD_BTN_CFG_100MS, + DA7219_AAD_BTN_CFG_200MS, + DA7219_AAD_BTN_CFG_500MS, +}; + +enum da7219_aad_mic_det_thr { + DA7219_AAD_MIC_DET_THR_200_OHMS = 0, + DA7219_AAD_MIC_DET_THR_500_OHMS, + DA7219_AAD_MIC_DET_THR_750_OHMS, + DA7219_AAD_MIC_DET_THR_1000_OHMS, +}; + +enum da7219_aad_jack_ins_deb { + DA7219_AAD_JACK_INS_DEB_5MS = 0, + DA7219_AAD_JACK_INS_DEB_10MS, + DA7219_AAD_JACK_INS_DEB_20MS, + DA7219_AAD_JACK_INS_DEB_50MS, + DA7219_AAD_JACK_INS_DEB_100MS, + DA7219_AAD_JACK_INS_DEB_200MS, + DA7219_AAD_JACK_INS_DEB_500MS, + DA7219_AAD_JACK_INS_DEB_1S, +}; + +enum da7219_aad_jack_det_rate { + DA7219_AAD_JACK_DET_RATE_32_64MS = 0, + DA7219_AAD_JACK_DET_RATE_64_128MS, + DA7219_AAD_JACK_DET_RATE_128_256MS, + DA7219_AAD_JACK_DET_RATE_256_512MS, +}; + +enum da7219_aad_jack_rem_deb { + DA7219_AAD_JACK_REM_DEB_1MS = 0, + DA7219_AAD_JACK_REM_DEB_5MS, + DA7219_AAD_JACK_REM_DEB_10MS, + DA7219_AAD_JACK_REM_DEB_20MS, +}; + +enum da7219_aad_btn_avg { + DA7219_AAD_BTN_AVG_1 = 0, + DA7219_AAD_BTN_AVG_2, + DA7219_AAD_BTN_AVG_4, + DA7219_AAD_BTN_AVG_8, +}; + +enum da7219_aad_adc_1bit_rpt { + DA7219_AAD_ADC_1BIT_RPT_1 = 0, + DA7219_AAD_ADC_1BIT_RPT_2, + DA7219_AAD_ADC_1BIT_RPT_4, + DA7219_AAD_ADC_1BIT_RPT_8, +}; + +struct da7219_aad_pdata { + int irq; + + enum da7219_aad_micbias_pulse_lvl micbias_pulse_lvl; + u32 micbias_pulse_time; + enum da7219_aad_btn_cfg btn_cfg; + enum da7219_aad_mic_det_thr mic_det_thr; + enum da7219_aad_jack_ins_deb jack_ins_deb; + enum da7219_aad_jack_det_rate jack_det_rate; + enum da7219_aad_jack_rem_deb jack_rem_deb; + + u8 a_d_btn_thr; + u8 d_b_btn_thr; + u8 b_c_btn_thr; + u8 c_mic_btn_thr; + + enum da7219_aad_btn_avg btn_avg; + enum da7219_aad_adc_1bit_rpt adc_1bit_rpt; +}; + +#endif /* __DA7219_AAD_PDATA_H */ diff --git a/include/sound/da7219.h b/include/sound/da7219.h new file mode 100644 index 000000000000..3f39e135312d --- /dev/null +++ b/include/sound/da7219.h @@ -0,0 +1,55 @@ +/* + * da7219.h - DA7219 ASoC Codec Driver Platform Data + * + * Copyright (c) 2015 Dialog Semiconductor + * + * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __DA7219_PDATA_H +#define __DA7219_PDATA_H + +/* LDO */ +enum da7219_ldo_lvl_sel { + DA7219_LDO_LVL_SEL_1_05V = 0, + DA7219_LDO_LVL_SEL_1_10V, + DA7219_LDO_LVL_SEL_1_20V, + DA7219_LDO_LVL_SEL_1_40V, +}; + +/* Mic Bias */ +enum da7219_micbias_voltage { + DA7219_MICBIAS_1_8V = 1, + DA7219_MICBIAS_2_0V, + DA7219_MICBIAS_2_2V, + DA7219_MICBIAS_2_4V, + DA7219_MICBIAS_2_6V, +}; + +/* Mic input type */ +enum da7219_mic_amp_in_sel { + DA7219_MIC_AMP_IN_SEL_DIFF = 0, + DA7219_MIC_AMP_IN_SEL_SE_P, + DA7219_MIC_AMP_IN_SEL_SE_N, +}; + +struct da7219_aad_pdata; + +struct da7219_pdata { + /* Internal LDO */ + enum da7219_ldo_lvl_sel ldo_lvl_sel; + + /* Mic */ + enum da7219_micbias_voltage micbias_lvl; + enum da7219_mic_amp_in_sel mic_amp_in_sel; + + /* AAD */ + struct da7219_aad_pdata *aad_pdata; +}; + +#endif /* __DA7219_PDATA_H */ diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h index 3a8fca9409a7..8966ba7c9629 100644 --- a/include/sound/designware_i2s.h +++ b/include/sound/designware_i2s.h @@ -38,6 +38,8 @@ struct i2s_clk_config_data { struct i2s_platform_data { #define DWC_I2S_PLAY (1 << 0) #define DWC_I2S_RECORD (1 << 1) + #define DW_I2S_SLAVE (1 << 2) + #define DW_I2S_MASTER (1 << 3) unsigned int cap; int channel; u32 snd_fmts; diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h index df705908480a..2767c55a641e 100644 --- a/include/sound/hda_regmap.h +++ b/include/sound/hda_regmap.h @@ -67,7 +67,7 @@ int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, * @reg: verb to write * @val: value to write * - * For writing an amp value, use snd_hda_regmap_amp_update(). + * For writing an amp value, use snd_hdac_regmap_update_amp(). */ static inline int snd_hdac_regmap_write(struct hdac_device *codec, hda_nid_t nid, @@ -85,7 +85,7 @@ snd_hdac_regmap_write(struct hdac_device *codec, hda_nid_t nid, * @mask: bit mask to update * @val: value to update * - * For updating an amp value, use snd_hda_regmap_amp_update(). + * For updating an amp value, use snd_hdac_regmap_update_amp(). */ static inline int snd_hdac_regmap_update(struct hdac_device *codec, hda_nid_t nid, diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 49bc836fcd84..e2b712c90d3f 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -21,6 +21,7 @@ struct hdac_stream; struct hdac_device; struct hdac_driver; struct hdac_widget_tree; +struct hda_device_id; /* * exported bus type @@ -28,16 +29,6 @@ struct hdac_widget_tree; extern struct bus_type snd_hda_bus_type; /* - * HDA device table - */ -struct hda_device_id { - __u32 vendor_id; - __u32 rev_id; - const char *name; - unsigned long driver_data; -}; - -/* * generic arrays */ struct snd_array { @@ -117,6 +108,8 @@ int snd_hdac_device_init(struct hdac_device *dev, struct hdac_bus *bus, void snd_hdac_device_exit(struct hdac_device *dev); int snd_hdac_device_register(struct hdac_device *codec); void snd_hdac_device_unregister(struct hdac_device *codec); +int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name); +int snd_hdac_codec_modalias(struct hdac_device *hdac, char *buf, size_t size); int snd_hdac_refresh_widgets(struct hdac_device *codec); int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec); @@ -147,6 +140,12 @@ int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid, bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid, unsigned int format); +int snd_hdac_codec_read(struct hdac_device *hdac, hda_nid_t nid, + int flags, unsigned int verb, unsigned int parm); +int snd_hdac_codec_write(struct hdac_device *hdac, hda_nid_t nid, + int flags, unsigned int verb, unsigned int parm); +bool snd_hdac_check_power_state(struct hdac_device *hdac, + hda_nid_t nid, unsigned int target_state); /** * snd_hdac_read_parm - read a codec parameter * @codec: the codec object diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 94210dcdb6ea..a4cadd9c297a 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -40,6 +40,13 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus); #define hbus_to_ebus(_bus) \ container_of(_bus, struct hdac_ext_bus, bus) +#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \ + { .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \ + .api_version = HDA_DEV_ASOC, \ + .driver_data = (unsigned long)(drv_data) } +#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \ + HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data) + int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *sbus); void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable); void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 691e7ee0a510..b0be09279943 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -265,12 +265,12 @@ struct snd_ratden { struct snd_pcm_hw_constraint_ratnums { int nrats; - struct snd_ratnum *rats; + const struct snd_ratnum *rats; }; struct snd_pcm_hw_constraint_ratdens { int nrats; - struct snd_ratden *rats; + const struct snd_ratden *rats; }; struct snd_pcm_hw_constraint_list { @@ -285,8 +285,6 @@ struct snd_pcm_hw_constraint_ranges { unsigned int mask; }; -struct snd_pcm_hwptr_log; - /* * userspace-provided audio timestamp config to kernel, * structure is for internal use only and filled with dedicated unpack routine @@ -404,10 +402,6 @@ struct snd_pcm_runtime { struct snd_pcm_hardware hw; struct snd_pcm_hw_constraints hw_constraints; - /* -- interrupt callbacks -- */ - void (*transfer_ack_begin)(struct snd_pcm_substream *substream); - void (*transfer_ack_end)(struct snd_pcm_substream *substream); - /* -- timer -- */ unsigned int timer_resolution; /* timer resolution */ int tstamp_type; /* timestamp type */ @@ -428,10 +422,6 @@ struct snd_pcm_runtime { /* -- OSS things -- */ struct snd_pcm_oss_runtime oss; #endif - -#ifdef CONFIG_SND_PCM_XRUN_DEBUG - struct snd_pcm_hwptr_log *hwptr_log; -#endif }; struct snd_pcm_group { /* keep linked substreams */ @@ -980,7 +970,7 @@ int snd_interval_list(struct snd_interval *i, unsigned int count, int snd_interval_ranges(struct snd_interval *i, unsigned int count, const struct snd_interval *list, unsigned int mask); int snd_interval_ratnum(struct snd_interval *i, - unsigned int rats_count, struct snd_ratnum *rats, + unsigned int rats_count, const struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp); void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params); @@ -1010,11 +1000,11 @@ int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, - struct snd_pcm_hw_constraint_ratnums *r); + const struct snd_pcm_hw_constraint_ratnums *r); int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, - struct snd_pcm_hw_constraint_ratdens *r); + const struct snd_pcm_hw_constraint_ratdens *r); int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, @@ -1034,6 +1024,22 @@ int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, snd_pcm_hw_rule_func_t func, void *private, int dep, ...); +/** + * snd_pcm_hw_constraint_single() - Constrain parameter to a single value + * @runtime: PCM runtime instance + * @var: The hw_params variable to constrain + * @val: The value to constrain to + * + * Return: Positive if the value is changed, zero if it's not changed, or a + * negative error code. + */ +static inline int snd_pcm_hw_constraint_single( + struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, + unsigned int val) +{ + return snd_pcm_hw_constraint_minmax(runtime, var, val, val); +} + int snd_pcm_format_signed(snd_pcm_format_t format); int snd_pcm_format_unsigned(snd_pcm_format_t format); int snd_pcm_format_linear(snd_pcm_format_t format); @@ -1117,10 +1123,16 @@ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substrea * Timer interface */ +#ifdef CONFIG_SND_PCM_TIMER void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream); void snd_pcm_timer_init(struct snd_pcm_substream *substream); void snd_pcm_timer_done(struct snd_pcm_substream *substream); - +#else +static inline void +snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream) {} +static inline void snd_pcm_timer_init(struct snd_pcm_substream *substream) {} +static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {} +#endif /** * snd_pcm_gettime - Fill the timespec depending on the timestamp mode * @runtime: PCM runtime instance diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h index 56e818e4a1cb..6ef629bde164 100644 --- a/include/sound/pxa2xx-lib.h +++ b/include/sound/pxa2xx-lib.h @@ -12,7 +12,6 @@ extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream); extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd); extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream); extern int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream); -extern void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id); extern int __pxa2xx_pcm_open(struct snd_pcm_substream *substream); extern int __pxa2xx_pcm_close(struct snd_pcm_substream *substream); extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream, diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h deleted file mode 100644 index bb7b2ebfee7b..000000000000 --- a/include/sound/rcar_snd.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Renesas R-Car SRU/SCU/SSIU/SSI support - * - * Copyright (C) 2013 Renesas Solutions Corp. - * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef RCAR_SND_H -#define RCAR_SND_H - -#include <linux/sh_clk.h> - -#define RSND_GEN1_SRU 0 -#define RSND_GEN1_ADG 1 -#define RSND_GEN1_SSI 2 - -#define RSND_GEN2_SCU 0 -#define RSND_GEN2_ADG 1 -#define RSND_GEN2_SSIU 2 -#define RSND_GEN2_SSI 3 - -#define RSND_BASE_MAX 4 - -/* - * flags - * - * 0xAB000000 - * - * A : clock sharing settings - * B : SSI direction - */ -#define RSND_SSI_CLK_PIN_SHARE (1 << 31) -#define RSND_SSI_NO_BUSIF (1 << 30) /* SSI+DMA without BUSIF */ - -#define RSND_SSI(_dma_id, _irq, _flags) \ -{ .dma_id = _dma_id, .irq = _irq, .flags = _flags } -#define RSND_SSI_UNUSED \ -{ .dma_id = -1, .irq = -1, .flags = 0 } - -struct rsnd_ssi_platform_info { - int dma_id; - int irq; - u32 flags; -}; - -#define RSND_SRC(rate, _dma_id) \ -{ .convert_rate = rate, .dma_id = _dma_id, } -#define RSND_SRC_UNUSED \ -{ .convert_rate = 0, .dma_id = -1, } - -struct rsnd_src_platform_info { - u32 convert_rate; /* sampling rate convert */ - int dma_id; /* for Gen2 SCU */ - int irq; -}; - -/* - * flags - */ -struct rsnd_ctu_platform_info { - u32 flags; -}; - -struct rsnd_mix_platform_info { - u32 flags; -}; - -struct rsnd_dvc_platform_info { - u32 flags; -}; - -struct rsnd_dai_path_info { - struct rsnd_ssi_platform_info *ssi; - struct rsnd_src_platform_info *src; - struct rsnd_ctu_platform_info *ctu; - struct rsnd_mix_platform_info *mix; - struct rsnd_dvc_platform_info *dvc; -}; - -struct rsnd_dai_platform_info { - struct rsnd_dai_path_info playback; - struct rsnd_dai_path_info capture; -}; - -/* - * flags - * - * 0x0000000A - * - * A : generation - */ -#define RSND_GEN_MASK (0xF << 0) -#define RSND_GEN1 (1 << 0) /* fixme */ -#define RSND_GEN2 (2 << 0) /* fixme */ - -struct rcar_snd_info { - u32 flags; - struct rsnd_ssi_platform_info *ssi_info; - int ssi_info_nr; - struct rsnd_src_platform_info *src_info; - int src_info_nr; - struct rsnd_ctu_platform_info *ctu_info; - int ctu_info_nr; - struct rsnd_mix_platform_info *mix_info; - int mix_info_nr; - struct rsnd_dvc_platform_info *dvc_info; - int dvc_info_nr; - struct rsnd_dai_platform_info *dai_info; - int dai_info_nr; - int (*start)(int id); - int (*stop)(int id); -}; - -#endif diff --git a/include/sound/rt5640.h b/include/sound/rt5640.h index 59d26dd81e45..e3c84b92ff70 100644 --- a/include/sound/rt5640.h +++ b/include/sound/rt5640.h @@ -12,9 +12,10 @@ #define __LINUX_SND_RT5640_H struct rt5640_platform_data { - /* IN1 & IN2 can optionally be differential */ + /* IN1 & IN2 & IN3 can optionally be differential */ bool in1_diff; bool in2_diff; + bool in3_diff; bool dmic_en; bool dmic1_data_pin; /* 0 = IN1P; 1 = GPIO3 */ diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h index 22734bc3ffd4..a5cf6152e778 100644 --- a/include/sound/rt5645.h +++ b/include/sound/rt5645.h @@ -21,6 +21,8 @@ struct rt5645_platform_data { /* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */ unsigned int jd_mode; + /* Invert JD when jack insert */ + bool jd_invert; }; #endif diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h index b9b4f289fe6b..0399352f3a62 100644 --- a/include/sound/simple_card.h +++ b/include/sound/simple_card.h @@ -19,6 +19,8 @@ struct asoc_simple_dai { unsigned int sysclk; int slots; int slot_width; + unsigned int tx_slot_mask; + unsigned int rx_slot_mask; struct clk *clk; }; diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 2df96b1384c7..212eaaf172ed 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -48,10 +48,25 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_GATED (0 << 4) /* clock is gated */ /* - * DAI hardware signal inversions. + * DAI hardware signal polarity. * * Specifies whether the DAI can also support inverted clocks for the specified * format. + * + * BCLK: + * - "normal" polarity means signal is available at rising edge of BCLK + * - "inverted" polarity means signal is available at falling edge of BCLK + * + * FSYNC "normal" polarity depends on the frame format: + * - I2S: frame consists of left then right channel data. Left channel starts + * with falling FSYNC edge, right channel starts with rising FSYNC edge. + * - Left/Right Justified: frame consists of left then right channel data. + * Left channel starts with rising FSYNC edge, right channel starts with + * falling FSYNC edge. + * - DSP A/B: Frame starts with rising FSYNC edge. + * - AC97: Frame starts with rising FSYNC edge. + * + * "Negative" FSYNC polarity is the one opposite of "normal" polarity. */ #define SND_SOC_DAIFMT_NB_NF (0 << 8) /* normal bit clock + frame */ #define SND_SOC_DAIFMT_NB_IF (2 << 8) /* normal BCLK + inv FRM */ @@ -214,7 +229,7 @@ struct snd_soc_dai_driver { int (*suspend)(struct snd_soc_dai *dai); int (*resume)(struct snd_soc_dai *dai); /* compress dai */ - bool compress_dai; + int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num); /* DAI is also used for the control bus */ bool bus_control; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 5abba037d245..7855cfe46b69 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -451,6 +451,9 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm( struct snd_kcontrol *kcontrol); +struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget( + struct snd_kcontrol *kcontrol); + int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level); diff --git a/include/sound/soc.h b/include/sound/soc.h index 884e728b09d9..a8b4b9c8b1d2 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -86,7 +86,7 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array),\ - .info = snd_soc_info_volsw, \ + .info = snd_soc_info_volsw_sx, \ .get = snd_soc_get_volsw_sx,\ .put = snd_soc_put_volsw_sx, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ @@ -156,7 +156,7 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array), \ - .info = snd_soc_info_volsw, \ + .info = snd_soc_info_volsw_sx, \ .get = snd_soc_get_volsw_sx, \ .put = snd_soc_put_volsw_sx, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ @@ -217,6 +217,13 @@ .get = xhandler_get, .put = xhandler_put, \ .private_value = \ SOC_DOUBLE_VALUE(reg, shift_left, shift_right, max, invert, 0) } +#define SOC_DOUBLE_R_EXT(xname, reg_left, reg_right, xshift, xmax, xinvert,\ + xhandler_get, xhandler_put) \ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ + .info = snd_soc_info_volsw, \ + .get = xhandler_get, .put = xhandler_put, \ + .private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \ + xmax, xinvert) } #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\ xhandler_get, xhandler_put, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ @@ -226,6 +233,18 @@ .info = snd_soc_info_volsw, \ .get = xhandler_get, .put = xhandler_put, \ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) } +#define SOC_SINGLE_RANGE_EXT_TLV(xname, xreg, xshift, xmin, xmax, xinvert, \ + xhandler_get, xhandler_put, tlv_array) \ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ + .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ + SNDRV_CTL_ELEM_ACCESS_READWRITE,\ + .tlv.p = (tlv_array), \ + .info = snd_soc_info_volsw_range, \ + .get = xhandler_get, .put = xhandler_put, \ + .private_value = (unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .rreg = xreg, .shift = xshift, \ + .rshift = xshift, .min = xmin, .max = xmax, \ + .platform_max = xmax, .invert = xinvert} } #define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\ xhandler_get, xhandler_put, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ @@ -440,7 +459,9 @@ int snd_soc_platform_read(struct snd_soc_platform *platform, int snd_soc_platform_write(struct snd_soc_platform *platform, unsigned int reg, unsigned int val); int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num); -int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num); +#ifdef CONFIG_SND_SOC_COMPRESS +int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num); +#endif struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card, const char *dai_link, int stream); @@ -574,6 +595,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); +int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo); #define snd_soc_info_bool_ext snd_ctl_boolean_mono_info int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); @@ -591,7 +614,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); -int snd_soc_limit_volume(struct snd_soc_codec *codec, +int snd_soc_limit_volume(struct snd_soc_card *card, const char *name, int max); int snd_soc_bytes_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); @@ -1601,6 +1624,8 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card, int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, const char *propname); int snd_soc_of_parse_tdm_slot(struct device_node *np, + unsigned int *tx_mask, + unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width); void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h index 898be3a8db9a..6d8f8fba3341 100644 --- a/include/sound/wm8904.h +++ b/include/sound/wm8904.h @@ -119,7 +119,7 @@ #define WM8904_MIC_REGS 2 #define WM8904_GPIO_REGS 4 #define WM8904_DRC_REGS 4 -#define WM8904_EQ_REGS 25 +#define WM8904_EQ_REGS 24 /** * DRC configurations are specified with a label and a set of register diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ac9bf1c0e42d..5f48754dc36a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -730,6 +730,7 @@ struct se_device { #define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 #define DF_USING_UDEV_PATH 0x00000008 #define DF_USING_ALIAS 0x00000010 +#define DF_READ_ONLY 0x00000020 /* Physical device queue depth */ u32 queue_depth; /* Used for SPC-2 reservations enforce of ISIDs */ diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 9a6a3fe0fb51..c92d1e1cbad9 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -9,6 +9,62 @@ #include <linux/tracepoint.h> #include <trace/events/gfpflags.h> +#define COMPACTION_STATUS \ + EM( COMPACT_DEFERRED, "deferred") \ + EM( COMPACT_SKIPPED, "skipped") \ + EM( COMPACT_CONTINUE, "continue") \ + EM( COMPACT_PARTIAL, "partial") \ + EM( COMPACT_COMPLETE, "complete") \ + EM( COMPACT_NO_SUITABLE_PAGE, "no_suitable_page") \ + EM( COMPACT_NOT_SUITABLE_ZONE, "not_suitable_zone") \ + EMe(COMPACT_CONTENDED, "contended") + +#ifdef CONFIG_ZONE_DMA +#define IFDEF_ZONE_DMA(X) X +#else +#define IFDEF_ZONE_DMA(X) +#endif + +#ifdef CONFIG_ZONE_DMA32 +#define IFDEF_ZONE_DMA32(X) X +#else +#define IFDEF_ZONE_DMA32(X) +#endif + +#ifdef CONFIG_HIGHMEM +#define IFDEF_ZONE_HIGHMEM(X) X +#else +#define IFDEF_ZONE_HIGHMEM(X) +#endif + +#define ZONE_TYPE \ + IFDEF_ZONE_DMA( EM (ZONE_DMA, "DMA")) \ + IFDEF_ZONE_DMA32( EM (ZONE_DMA32, "DMA32")) \ + EM (ZONE_NORMAL, "Normal") \ + IFDEF_ZONE_HIGHMEM( EM (ZONE_HIGHMEM,"HighMem")) \ + EMe(ZONE_MOVABLE,"Movable") + +/* + * First define the enums in the above macros to be exported to userspace + * via TRACE_DEFINE_ENUM(). + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +COMPACTION_STATUS +ZONE_TYPE + +/* + * Now redefine the EM() and EMe() macros to map the enums to the strings + * that will be printed in the output. + */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + DECLARE_EVENT_CLASS(mm_compaction_isolate_template, TP_PROTO( @@ -161,7 +217,7 @@ TRACE_EVENT(mm_compaction_end, __entry->free_pfn, __entry->zone_end, __entry->sync ? "sync" : "async", - compaction_status_string[__entry->status]) + __print_symbolic(__entry->status, COMPACTION_STATUS)) ); TRACE_EVENT(mm_compaction_try_to_compact_pages, @@ -201,23 +257,23 @@ DECLARE_EVENT_CLASS(mm_compaction_suitable_template, TP_STRUCT__entry( __field(int, nid) - __field(char *, name) + __field(enum zone_type, idx) __field(int, order) __field(int, ret) ), TP_fast_assign( __entry->nid = zone_to_nid(zone); - __entry->name = (char *)zone->name; + __entry->idx = zone_idx(zone); __entry->order = order; __entry->ret = ret; ), TP_printk("node=%d zone=%-8s order=%d ret=%s", __entry->nid, - __entry->name, + __print_symbolic(__entry->idx, ZONE_TYPE), __entry->order, - compaction_status_string[__entry->ret]) + __print_symbolic(__entry->ret, COMPACTION_STATUS)) ); DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_finished, @@ -247,7 +303,7 @@ DECLARE_EVENT_CLASS(mm_compaction_defer_template, TP_STRUCT__entry( __field(int, nid) - __field(char *, name) + __field(enum zone_type, idx) __field(int, order) __field(unsigned int, considered) __field(unsigned int, defer_shift) @@ -256,7 +312,7 @@ DECLARE_EVENT_CLASS(mm_compaction_defer_template, TP_fast_assign( __entry->nid = zone_to_nid(zone); - __entry->name = (char *)zone->name; + __entry->idx = zone_idx(zone); __entry->order = order; __entry->considered = zone->compact_considered; __entry->defer_shift = zone->compact_defer_shift; @@ -265,7 +321,7 @@ DECLARE_EVENT_CLASS(mm_compaction_defer_template, TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu", __entry->nid, - __entry->name, + __print_symbolic(__entry->idx, ZONE_TYPE), __entry->order, __entry->order_failed, __entry->considered, diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index a01946514b5a..00b4a6308249 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -514,6 +514,34 @@ TRACE_EVENT(f2fs_map_blocks, __entry->ret) ); +TRACE_EVENT(f2fs_background_gc, + + TP_PROTO(struct super_block *sb, long wait_ms, + unsigned int prefree, unsigned int free), + + TP_ARGS(sb, wait_ms, prefree, free), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(long, wait_ms) + __field(unsigned int, prefree) + __field(unsigned int, free) + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->wait_ms = wait_ms; + __entry->prefree = prefree; + __entry->free = free; + ), + + TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u", + show_dev(__entry), + __entry->wait_ms, + __entry->prefree, + __entry->free) +); + TRACE_EVENT(f2fs_get_victim, TP_PROTO(struct super_block *sb, int type, int gc_type, @@ -1000,6 +1028,32 @@ TRACE_EVENT(f2fs_writepages, __entry->for_sync) ); +TRACE_EVENT(f2fs_readpages, + + TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage), + + TP_ARGS(inode, page, nrpage), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(pgoff_t, start) + __field(unsigned int, nrpage) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->start = page->index; + __entry->nrpage = nrpage; + ), + + TP_printk("dev = (%d,%d), ino = %lu, start = %lu nrpage = %u", + show_dev_ino(__entry), + (unsigned long)__entry->start, + __entry->nrpage) +); + TRACE_EVENT(f2fs_write_checkpoint, TP_PROTO(struct super_block *sb, int reason, char *msg), @@ -1132,17 +1186,19 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, __entry->len) ); -TRACE_EVENT(f2fs_update_extent_tree, +TRACE_EVENT(f2fs_update_extent_tree_range, - TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr), + TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr, + unsigned int len), - TP_ARGS(inode, pgofs, blkaddr), + TP_ARGS(inode, pgofs, blkaddr, len), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) __field(u32, blk) + __field(unsigned int, len) ), TP_fast_assign( @@ -1150,12 +1206,15 @@ TRACE_EVENT(f2fs_update_extent_tree, __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->blk = blkaddr; + __entry->len = len; ), - TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, blkaddr = %u", + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " + "blkaddr = %u, len = %u", show_dev_ino(__entry), __entry->pgofs, - __entry->blk) + __entry->blk, + __entry->len) ); TRACE_EVENT(f2fs_shrink_extent_tree, diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index a0d008070962..c72f2dc01d0b 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -81,15 +81,47 @@ DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, st DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lock *fl), TP_ARGS(inode, fl)); -DEFINE_EVENT(filelock_lease, generic_add_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), - TP_ARGS(inode, fl)); - DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lock *fl), TP_ARGS(inode, fl)); DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lock *fl), TP_ARGS(inode, fl)); +TRACE_EVENT(generic_add_lease, + TP_PROTO(struct inode *inode, struct file_lock *fl), + + TP_ARGS(inode, fl), + + TP_STRUCT__entry( + __field(unsigned long, i_ino) + __field(int, wcount) + __field(int, dcount) + __field(int, icount) + __field(dev_t, s_dev) + __field(fl_owner_t, fl_owner) + __field(unsigned int, fl_flags) + __field(unsigned char, fl_type) + ), + + TP_fast_assign( + __entry->s_dev = inode->i_sb->s_dev; + __entry->i_ino = inode->i_ino; + __entry->wcount = atomic_read(&inode->i_writecount); + __entry->dcount = d_count(fl->fl_file->f_path.dentry); + __entry->icount = atomic_read(&inode->i_count); + __entry->fl_owner = fl ? fl->fl_owner : NULL; + __entry->fl_flags = fl ? fl->fl_flags : 0; + __entry->fl_type = fl ? fl->fl_type : 0; + ), + + TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d dcount=%d icount=%d fl_owner=0x%p fl_flags=%s fl_type=%s", + MAJOR(__entry->s_dev), MINOR(__entry->s_dev), + __entry->i_ino, __entry->wcount, __entry->dcount, + __entry->icount, __entry->fl_owner, + show_fl_flags(__entry->fl_flags), + show_fl_type(__entry->fl_type)) +); + #endif /* _TRACE_FILELOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 539d6bc3216a..9b90c57517a9 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -104,22 +104,17 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, TP_ARGS(p)); #ifdef CREATE_TRACE_POINTS -static inline long __trace_sched_switch_state(struct task_struct *p) +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { - long state = p->state; - -#ifdef CONFIG_PREEMPT #ifdef CONFIG_SCHED_DEBUG BUG_ON(p != current); #endif /* CONFIG_SCHED_DEBUG */ + /* - * For all intents and purposes a preempted task is a running task. + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). */ - if (preempt_count() & PREEMPT_ACTIVE) - state = TASK_RUNNING | TASK_STATE_MAX; -#endif /* CONFIG_PREEMPT */ - - return state; + return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; } #endif /* CREATE_TRACE_POINTS */ @@ -128,10 +123,11 @@ static inline long __trace_sched_switch_state(struct task_struct *p) */ TRACE_EVENT(sched_switch, - TP_PROTO(struct task_struct *prev, + TP_PROTO(bool preempt, + struct task_struct *prev, struct task_struct *next), - TP_ARGS(prev, next), + TP_ARGS(preempt, prev, next), TP_STRUCT__entry( __array( char, prev_comm, TASK_COMM_LEN ) @@ -147,7 +143,7 @@ TRACE_EVENT(sched_switch, memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid; __entry->prev_prio = prev->prio; - __entry->prev_state = __trace_sched_switch_state(prev); + __entry->prev_state = __trace_sched_switch_state(preempt, prev); memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h index dbf017bfddd9..22afa26e34b2 100644 --- a/include/trace/events/v4l2.h +++ b/include/trace/events/v4l2.h @@ -5,6 +5,7 @@ #define _TRACE_V4L2_H #include <linux/tracepoint.h> +#include <media/videobuf2-v4l2.h> /* Enums require being exported to userspace, for user tool parsing */ #undef EM @@ -27,6 +28,7 @@ EM( V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, "VIDEO_CAPTURE_MPLANE" ) \ EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \ EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \ + EM( V4L2_BUF_TYPE_SDR_OUTPUT, "SDR_OUTPUT" ) \ EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" ) SHOW_TYPE @@ -174,17 +176,12 @@ DEFINE_EVENT(v4l2_event_class, v4l2_qbuf, TP_ARGS(minor, buf) ); -DECLARE_EVENT_CLASS(vb2_event_class, +DECLARE_EVENT_CLASS(vb2_v4l2_event_class, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb), TP_STRUCT__entry( __field(int, minor) - __field(u32, queued_count) - __field(int, owned_by_drv_count) - __field(u32, index) - __field(u32, type) - __field(u32, bytesused) __field(u32, flags) __field(u32, field) __field(s64, timestamp) @@ -202,38 +199,30 @@ DECLARE_EVENT_CLASS(vb2_event_class, ), TP_fast_assign( - __entry->minor = q->owner ? q->owner->vdev->minor : -1; - __entry->queued_count = q->queued_count; - __entry->owned_by_drv_count = - atomic_read(&q->owned_by_drv_count); - __entry->index = vb->v4l2_buf.index; - __entry->type = vb->v4l2_buf.type; - __entry->bytesused = vb->v4l2_planes[0].bytesused; - __entry->flags = vb->v4l2_buf.flags; - __entry->field = vb->v4l2_buf.field; - __entry->timestamp = timeval_to_ns(&vb->v4l2_buf.timestamp); - __entry->timecode_type = vb->v4l2_buf.timecode.type; - __entry->timecode_flags = vb->v4l2_buf.timecode.flags; - __entry->timecode_frames = vb->v4l2_buf.timecode.frames; - __entry->timecode_seconds = vb->v4l2_buf.timecode.seconds; - __entry->timecode_minutes = vb->v4l2_buf.timecode.minutes; - __entry->timecode_hours = vb->v4l2_buf.timecode.hours; - __entry->timecode_userbits0 = vb->v4l2_buf.timecode.userbits[0]; - __entry->timecode_userbits1 = vb->v4l2_buf.timecode.userbits[1]; - __entry->timecode_userbits2 = vb->v4l2_buf.timecode.userbits[2]; - __entry->timecode_userbits3 = vb->v4l2_buf.timecode.userbits[3]; - __entry->sequence = vb->v4l2_buf.sequence; + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct v4l2_fh *owner = q->owner; + + __entry->minor = owner ? owner->vdev->minor : -1; + __entry->flags = vbuf->flags; + __entry->field = vbuf->field; + __entry->timestamp = timeval_to_ns(&vbuf->timestamp); + __entry->timecode_type = vbuf->timecode.type; + __entry->timecode_flags = vbuf->timecode.flags; + __entry->timecode_frames = vbuf->timecode.frames; + __entry->timecode_seconds = vbuf->timecode.seconds; + __entry->timecode_minutes = vbuf->timecode.minutes; + __entry->timecode_hours = vbuf->timecode.hours; + __entry->timecode_userbits0 = vbuf->timecode.userbits[0]; + __entry->timecode_userbits1 = vbuf->timecode.userbits[1]; + __entry->timecode_userbits2 = vbuf->timecode.userbits[2]; + __entry->timecode_userbits3 = vbuf->timecode.userbits[3]; + __entry->sequence = vbuf->sequence; ), - TP_printk("minor = %d, queued = %u, owned_by_drv = %d, index = %u, " - "type = %s, bytesused = %u, flags = %s, field = %s, " + TP_printk("minor=%d flags = %s, field = %s, " "timestamp = %llu, timecode = { type = %s, flags = %s, " "frames = %u, seconds = %u, minutes = %u, hours = %u, " "userbits = { %u %u %u %u } }, sequence = %u", __entry->minor, - __entry->queued_count, - __entry->owned_by_drv_count, - __entry->index, show_type(__entry->type), - __entry->bytesused, show_flags(__entry->flags), show_field(__entry->field), __entry->timestamp, @@ -251,22 +240,22 @@ DECLARE_EVENT_CLASS(vb2_event_class, ) ) -DEFINE_EVENT(vb2_event_class, vb2_buf_done, +DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_done, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); -DEFINE_EVENT(vb2_event_class, vb2_buf_queue, +DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_buf_queue, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); -DEFINE_EVENT(vb2_event_class, vb2_dqbuf, +DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_dqbuf, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); -DEFINE_EVENT(vb2_event_class, vb2_qbuf, +DEFINE_EVENT(vb2_v4l2_event_class, vb2_v4l2_qbuf, TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), TP_ARGS(q, vb) ); diff --git a/include/trace/events/vb2.h b/include/trace/events/vb2.h new file mode 100644 index 000000000000..bfeceeba3744 --- /dev/null +++ b/include/trace/events/vb2.h @@ -0,0 +1,65 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vb2 + +#if !defined(_TRACE_VB2_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_VB2_H + +#include <linux/tracepoint.h> +#include <media/videobuf2-core.h> + +DECLARE_EVENT_CLASS(vb2_event_class, + TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), + TP_ARGS(q, vb), + + TP_STRUCT__entry( + __field(void *, owner) + __field(u32, queued_count) + __field(int, owned_by_drv_count) + __field(u32, index) + __field(u32, type) + __field(u32, bytesused) + ), + + TP_fast_assign( + __entry->owner = q->owner; + __entry->queued_count = q->queued_count; + __entry->owned_by_drv_count = + atomic_read(&q->owned_by_drv_count); + __entry->index = vb->index; + __entry->type = vb->type; + __entry->bytesused = vb->planes[0].bytesused; + ), + + TP_printk("owner = %p, queued = %u, owned_by_drv = %d, index = %u, " + "type = %u, bytesused = %u", __entry->owner, + __entry->queued_count, + __entry->owned_by_drv_count, + __entry->index, __entry->type, + __entry->bytesused + ) +) + +DEFINE_EVENT(vb2_event_class, vb2_buf_done, + TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), + TP_ARGS(q, vb) +); + +DEFINE_EVENT(vb2_event_class, vb2_buf_queue, + TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), + TP_ARGS(q, vb) +); + +DEFINE_EVENT(vb2_event_class, vb2_dqbuf, + TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), + TP_ARGS(q, vb) +); + +DEFINE_EVENT(vb2_event_class, vb2_qbuf, + TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), + TP_ARGS(q, vb) +); + +#endif /* if !defined(_TRACE_VB2_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index ddc3b36f1046..a74dd84bbb6d 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -25,6 +25,11 @@ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ #endif +/* + * Flags for mlock + */ +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h index e9fe6fd2a074..7162cd4cca73 100644 --- a/include/uapi/asm-generic/mman.h +++ b/include/uapi/asm-generic/mman.h @@ -17,5 +17,6 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#define MCL_ONFAULT 4 /* lock all pages that are faulted in */ #endif /* __ASM_GENERIC_MMAN_H */ diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h index 9df61f1edb0f..3094618d382f 100644 --- a/include/uapi/asm-generic/signal.h +++ b/include/uapi/asm-generic/signal.h @@ -80,8 +80,10 @@ * SA_RESTORER 0x04000000 */ +#if !defined MINSIGSTKSZ || !defined SIGSTKSZ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 +#endif #ifndef __ASSEMBLY__ typedef struct { diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 8da542a2874d..1324b0292ec2 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -709,17 +709,21 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) __SYSCALL(__NR_bpf, sys_bpf) #define __NR_execveat 281 __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) -#define __NR_membarrier 282 +#define __NR_userfaultfd 282 +__SYSCALL(__NR_userfaultfd, sys_userfaultfd) +#define __NR_membarrier 283 __SYSCALL(__NR_membarrier, sys_membarrier) +#define __NR_mlock2 284 +__SYSCALL(__NR_mlock2, sys_mlock2) #undef __NR_syscalls -#define __NR_syscalls 283 +#define __NR_syscalls 285 /* * All syscalls below here should go away really, * these are provided for both review and as a porting * help for the C library version. -* + * * Last chance: are any of these important enough to * enable by default? */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index f7b2db44eb4b..70d89230b641 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -263,6 +263,7 @@ header-y += minix_fs.h header-y += mman.h header-y += mmtimer.h header-y += mpls.h +header-y += mpls_iptunnel.h header-y += mqueue.h header-y += mroute6.h header-y += mroute.h diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h index 10f0fa29454f..9c9c6ad55f14 100644 --- a/include/uapi/linux/atm_zatm.h +++ b/include/uapi/linux/atm_zatm.h @@ -35,12 +35,6 @@ struct zatm_pool_req { struct zatm_pool_info info; /* actual information */ }; -struct zatm_t_hist { - struct timeval real; /* real (wall-clock) time */ - struct timeval expected; /* expected real time */ -}; - - #define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */ #define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */ #define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 92a48e2d5461..9ea2d22fa2cb 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -63,50 +63,16 @@ struct bpf_insn { __s32 imm; /* signed immediate constant */ }; -/* BPF syscall commands */ +/* BPF syscall commands, see bpf(2) man-page for details. */ enum bpf_cmd { - /* create a map with given type and attributes - * fd = bpf(BPF_MAP_CREATE, union bpf_attr *, u32 size) - * returns fd or negative error - * map is deleted when fd is closed - */ BPF_MAP_CREATE, - - /* lookup key in a given map - * err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key, attr->value - * returns zero and stores found elem into value - * or negative error - */ BPF_MAP_LOOKUP_ELEM, - - /* create or update key/value pair in a given map - * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key, attr->value, attr->flags - * returns zero or negative error - */ BPF_MAP_UPDATE_ELEM, - - /* find and delete elem by key in a given map - * err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key - * returns zero or negative error - */ BPF_MAP_DELETE_ELEM, - - /* lookup key in a given map and return next key - * err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key, attr->next_key - * returns zero and stores next key or negative error - */ BPF_MAP_GET_NEXT_KEY, - - /* verify and load eBPF program - * prog_fd = bpf(BPF_PROG_LOAD, union bpf_attr *attr, u32 size) - * Using attr->prog_type, attr->insns, attr->license - * returns fd or negative error - */ BPF_PROG_LOAD, + BPF_OBJ_PIN, + BPF_OBJ_GET, }; enum bpf_map_type { @@ -160,6 +126,11 @@ union bpf_attr { __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* checked when prog_type=kprobe */ }; + + struct { /* anonymous struct used by BPF_OBJ_* commands */ + __aligned_u64 pathname; + __u32 bpf_fd; + }; } __attribute__((aligned(8))); /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -272,6 +243,32 @@ enum bpf_func_id { BPF_FUNC_skb_get_tunnel_key, BPF_FUNC_skb_set_tunnel_key, BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */ + /** + * bpf_redirect(ifindex, flags) - redirect to another netdev + * @ifindex: ifindex of the net device + * @flags: bit 0 - if set, redirect to ingress instead of egress + * other bits - reserved + * Return: TC_ACT_REDIRECT + */ + BPF_FUNC_redirect, + + /** + * bpf_get_route_realm(skb) - retrieve a dst's tclassid + * @skb: pointer to skb + * Return: realm if != 0 + */ + BPF_FUNC_get_route_realm, + + /** + * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample + * @ctx: struct pt_regs* + * @map: pointer to perf_event_array map + * @index: index of event in the map + * @data: data on stack to be output as raw data + * @size: size of data + * Return: 0 on success + */ + BPF_FUNC_perf_event_output, __BPF_FUNC_MAX_ID, }; @@ -293,6 +290,7 @@ struct __sk_buff { __u32 tc_index; __u32 cb[5]; __u32 hash; + __u32 tc_classid; }; struct bpf_tunnel_key { diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h index 89ddb9dc9bdf..7a291dc1ff15 100644 --- a/include/uapi/linux/can/bcm.h +++ b/include/uapi/linux/can/bcm.h @@ -47,6 +47,11 @@ #include <linux/types.h> #include <linux/can.h> +struct bcm_timeval { + long tv_sec; + long tv_usec; +}; + /** * struct bcm_msg_head - head of messages to/from the broadcast manager * @opcode: opcode, see enum below. @@ -62,7 +67,7 @@ struct bcm_msg_head { __u32 opcode; __u32 flags; __u32 count; - struct timeval ival1, ival2; + struct bcm_timeval ival1, ival2; canid_t can_id; __u32 nframes; struct can_frame frames[0]; diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index d34611e35a30..30afd0a23c4b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -267,9 +267,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 33 +#define DM_VERSION_MINOR 34 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2015-8-18)" +#define DM_VERSION_EXTRA "-ioctl (2015-10-28)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h index 46e34bd0e783..cfb642f8e7bd 100644 --- a/include/uapi/linux/if_arcnet.h +++ b/include/uapi/linux/if_arcnet.h @@ -19,7 +19,6 @@ #include <linux/types.h> #include <linux/if_ether.h> - /* * These are the defined ARCnet Protocol ID's. */ @@ -57,42 +56,40 @@ * The RFC1201-specific components of an arcnet packet header. */ struct arc_rfc1201 { - __u8 proto; /* protocol ID field - varies */ - __u8 split_flag; /* for use with split packets */ - __be16 sequence; /* sequence number */ - __u8 payload[0]; /* space remaining in packet (504 bytes)*/ + __u8 proto; /* protocol ID field - varies */ + __u8 split_flag; /* for use with split packets */ + __be16 sequence; /* sequence number */ + __u8 payload[0]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 - /* * The RFC1051-specific components. */ struct arc_rfc1051 { - __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ - __u8 payload[0]; /* 507 bytes */ + __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ + __u8 payload[0]; /* 507 bytes */ }; #define RFC1051_HDR_SIZE 1 - /* * The ethernet-encap-specific components. We have a real ethernet header * and some data. */ struct arc_eth_encap { - __u8 proto; /* Always ARC_P_ETHER */ - struct ethhdr eth; /* standard ethernet header (yuck!) */ - __u8 payload[0]; /* 493 bytes */ + __u8 proto; /* Always ARC_P_ETHER */ + struct ethhdr eth; /* standard ethernet header (yuck!) */ + __u8 payload[0]; /* 493 bytes */ }; #define ETH_ENCAP_HDR_SIZE 14 - struct arc_cap { __u8 proto; - __u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */ + __u8 cookie[sizeof(int)]; + /* Actually NOT sent over the network */ union { __u8 ack; - __u8 raw[0]; /* 507 bytes */ + __u8 raw[0]; /* 507 bytes */ } mes; }; @@ -105,9 +102,9 @@ struct arc_cap { * driver. */ struct arc_hardware { - __u8 source, /* source ARCnet - filled in automagically */ - dest, /* destination ARCnet - 0 for broadcast */ - offset[2]; /* offset bytes (some weird semantics) */ + __u8 source; /* source ARCnet - filled in automagically */ + __u8 dest; /* destination ARCnet - 0 for broadcast */ + __u8 offset[2]; /* offset bytes (some weird semantics) */ }; #define ARC_HDR_SIZE 4 @@ -116,17 +113,17 @@ struct arc_hardware { * when you do a raw packet capture). */ struct archdr { - /* hardware requirements */ - struct arc_hardware hard; - - /* arcnet encapsulation-specific bits */ - union { - struct arc_rfc1201 rfc1201; - struct arc_rfc1051 rfc1051; - struct arc_eth_encap eth_encap; - struct arc_cap cap; - __u8 raw[0]; /* 508 bytes */ - } soft; + /* hardware requirements */ + struct arc_hardware hard; + + /* arcnet encapsulation-specific bits */ + union { + struct arc_rfc1201 rfc1201; + struct arc_rfc1051 rfc1051; + struct arc_eth_encap eth_encap; + struct arc_cap cap; + __u8 raw[0]; /* 508 bytes */ + } soft; }; #endif /* _LINUX_IF_ARCNET_H */ diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 3635b7797508..18db14477bdd 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -127,6 +127,7 @@ enum { #define BRIDGE_VLAN_INFO_UNTAGGED (1<<2) /* VLAN egresses untagged */ #define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */ #define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */ +#define BRIDGE_VLAN_INFO_BRENTRY (1<<5) /* Global bridge VLAN entry */ struct bridge_vlan_info { __u16 flags; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 3a5f263cfc2f..5ad57375a99f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -232,11 +232,47 @@ enum { IFLA_BR_PRIORITY, IFLA_BR_VLAN_FILTERING, IFLA_BR_VLAN_PROTOCOL, + IFLA_BR_GROUP_FWD_MASK, + IFLA_BR_ROOT_ID, + IFLA_BR_BRIDGE_ID, + IFLA_BR_ROOT_PORT, + IFLA_BR_ROOT_PATH_COST, + IFLA_BR_TOPOLOGY_CHANGE, + IFLA_BR_TOPOLOGY_CHANGE_DETECTED, + IFLA_BR_HELLO_TIMER, + IFLA_BR_TCN_TIMER, + IFLA_BR_TOPOLOGY_CHANGE_TIMER, + IFLA_BR_GC_TIMER, + IFLA_BR_GROUP_ADDR, + IFLA_BR_FDB_FLUSH, + IFLA_BR_MCAST_ROUTER, + IFLA_BR_MCAST_SNOOPING, + IFLA_BR_MCAST_QUERY_USE_IFADDR, + IFLA_BR_MCAST_QUERIER, + IFLA_BR_MCAST_HASH_ELASTICITY, + IFLA_BR_MCAST_HASH_MAX, + IFLA_BR_MCAST_LAST_MEMBER_CNT, + IFLA_BR_MCAST_STARTUP_QUERY_CNT, + IFLA_BR_MCAST_LAST_MEMBER_INTVL, + IFLA_BR_MCAST_MEMBERSHIP_INTVL, + IFLA_BR_MCAST_QUERIER_INTVL, + IFLA_BR_MCAST_QUERY_INTVL, + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, + IFLA_BR_MCAST_STARTUP_QUERY_INTVL, + IFLA_BR_NF_CALL_IPTABLES, + IFLA_BR_NF_CALL_IP6TABLES, + IFLA_BR_NF_CALL_ARPTABLES, + IFLA_BR_VLAN_DEFAULT_PVID, __IFLA_BR_MAX, }; #define IFLA_BR_MAX (__IFLA_BR_MAX - 1) +struct ifla_bridge_id { + __u8 prio[2]; + __u8 addr[6]; /* ETH_ALEN */ +}; + enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, @@ -256,6 +292,19 @@ enum { IFLA_BRPORT_PROXYARP, /* proxy ARP */ IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */ + IFLA_BRPORT_ROOT_ID, /* designated root */ + IFLA_BRPORT_BRIDGE_ID, /* designated bridge */ + IFLA_BRPORT_DESIGNATED_PORT, + IFLA_BRPORT_DESIGNATED_COST, + IFLA_BRPORT_ID, + IFLA_BRPORT_NO, + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, + IFLA_BRPORT_CONFIG_PENDING, + IFLA_BRPORT_MESSAGE_AGE_TIMER, + IFLA_BRPORT_FORWARD_DELAY_TIMER, + IFLA_BRPORT_HOLD_TIMER, + IFLA_BRPORT_FLUSH, + IFLA_BRPORT_MULTICAST_ROUTER, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -412,6 +461,7 @@ enum { IFLA_GENEVE_TOS, IFLA_GENEVE_PORT, /* destination port */ IFLA_GENEVE_COLLECT_METADATA, + IFLA_GENEVE_REMOTE6, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -501,6 +551,7 @@ enum { * on/off switch */ IFLA_VF_STATS, /* network device statistics */ + IFLA_VF_TRUST, /* Trust VF */ __IFLA_VF_MAX, }; @@ -562,6 +613,11 @@ enum { #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1) +struct ifla_vf_trust { + __u32 vf; + __u32 setting; +}; + /* VF ports management section * * Nested layout of set/get msg is: diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index 2f8b11722204..7c63bd67c36e 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -35,6 +35,8 @@ enum iio_chan_type { IIO_ENERGY, IIO_DISTANCE, IIO_VELOCITY, + IIO_CONCENTRATION, + IIO_RESISTANCE, }; enum iio_modifier { @@ -72,6 +74,8 @@ enum iio_modifier { IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z, IIO_MOD_I, IIO_MOD_Q, + IIO_MOD_CO2, + IIO_MOD_VOC, }; enum iio_event_type { diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a9256f0331ae..03f3618612aa 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -183,6 +183,7 @@ struct kvm_s390_skeys { #define KVM_EXIT_EPR 23 #define KVM_EXIT_SYSTEM_EVENT 24 #define KVM_EXIT_S390_STSI 25 +#define KVM_EXIT_IOAPIC_EOI 26 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -333,6 +334,10 @@ struct kvm_run { __u8 sel1; __u16 sel2; } s390_stsi; + /* KVM_EXIT_IOAPIC_EOI */ + struct { + __u8 vector; + } eoi; /* Fix the size of the union. */ char padding[256]; }; @@ -824,6 +829,8 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_MULTI_ADDRESS_SPACE 118 #define KVM_CAP_GUEST_DEBUG_HW_BPS 119 #define KVM_CAP_GUEST_DEBUG_HW_WPS 120 +#define KVM_CAP_SPLIT_IRQCHIP 121 +#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h new file mode 100644 index 000000000000..928f98997d8a --- /dev/null +++ b/include/uapi/linux/lightnvm.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 CNEX Labs. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + */ + +#ifndef _UAPI_LINUX_LIGHTNVM_H +#define _UAPI_LINUX_LIGHTNVM_H + +#ifdef __KERNEL__ +#include <linux/kernel.h> +#include <linux/ioctl.h> +#else /* __KERNEL__ */ +#include <stdio.h> +#include <sys/ioctl.h> +#define DISK_NAME_LEN 32 +#endif /* __KERNEL__ */ + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define NVM_TTYPE_NAME_MAX 48 +#define NVM_TTYPE_MAX 63 + +#define NVM_CTRL_FILE "/dev/lightnvm/control" + +struct nvm_ioctl_info_tgt { + __u32 version[3]; + __u32 reserved; + char tgtname[NVM_TTYPE_NAME_MAX]; +}; + +struct nvm_ioctl_info { + __u32 version[3]; /* in/out - major, minor, patch */ + __u16 tgtsize; /* number of targets */ + __u16 reserved16; /* pad to 4K page */ + __u32 reserved[12]; + struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX]; +}; + +enum { + NVM_DEVICE_ACTIVE = 1 << 0, +}; + +struct nvm_ioctl_device_info { + char devname[DISK_NAME_LEN]; + char bmname[NVM_TTYPE_NAME_MAX]; + __u32 bmversion[3]; + __u32 flags; + __u32 reserved[8]; +}; + +struct nvm_ioctl_get_devices { + __u32 nr_devices; + __u32 reserved[31]; + struct nvm_ioctl_device_info info[31]; +}; + +struct nvm_ioctl_create_simple { + __u32 lun_begin; + __u32 lun_end; +}; + +enum { + NVM_CONFIG_TYPE_SIMPLE = 0, +}; + +struct nvm_ioctl_create_conf { + __u32 type; + union { + struct nvm_ioctl_create_simple s; + }; +}; + +struct nvm_ioctl_create { + char dev[DISK_NAME_LEN]; /* open-channel SSD device */ + char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */ + char tgtname[DISK_NAME_LEN]; /* dev to expose target as */ + + __u32 flags; + + struct nvm_ioctl_create_conf conf; +}; + +struct nvm_ioctl_remove { + char tgtname[DISK_NAME_LEN]; + + __u32 flags; +}; + + +/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */ +enum { + /* top level cmds */ + NVM_INFO_CMD = 0x20, + NVM_GET_DEVICES_CMD, + + /* device level cmds */ + NVM_DEV_CREATE_CMD, + NVM_DEV_REMOVE_CMD, +}; + +#define NVM_IOCTL 'L' /* 0x4c */ + +#define NVM_INFO _IOWR(NVM_IOCTL, NVM_INFO_CMD, \ + struct nvm_ioctl_info) +#define NVM_GET_DEVICES _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \ + struct nvm_ioctl_get_devices) +#define NVM_DEV_CREATE _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \ + struct nvm_ioctl_create) +#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \ + struct nvm_ioctl_remove) + +#define NVM_VERSION_MAJOR 1 +#define NVM_VERSION_MINOR 0 +#define NVM_VERSION_PATCHLEVEL 0 + +#endif diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index e0cecd2eabdc..c8125ec1f4f2 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -21,6 +21,7 @@ enum { LO_FLAGS_READ_ONLY = 1, LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_PARTSCAN = 8, + LO_FLAGS_DIRECT_IO = 16, }; #include <asm/posix_types.h> /* for __kernel_old_dev_t */ @@ -86,6 +87,7 @@ struct loop_info64 { #define LOOP_GET_STATUS64 0x4C05 #define LOOP_CHANGE_FD 0x4C06 #define LOOP_SET_CAPACITY 0x4C07 +#define LOOP_SET_DIRECT_IO 0x4C08 /* /dev/loop-control interface */ #define LOOP_CTL_ADD 0x4C80 diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 34141a5dfe74..f8b01887a495 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -21,8 +21,6 @@ enum lwtunnel_ip_t { LWTUNNEL_IP_SRC, LWTUNNEL_IP_TTL, LWTUNNEL_IP_TOS, - LWTUNNEL_IP_SPORT, - LWTUNNEL_IP_DPORT, LWTUNNEL_IP_FLAGS, __LWTUNNEL_IP_MAX, }; @@ -36,8 +34,6 @@ enum lwtunnel_ip6_t { LWTUNNEL_IP6_SRC, LWTUNNEL_IP6_HOPLIMIT, LWTUNNEL_IP6_TC, - LWTUNNEL_IP6_SPORT, - LWTUNNEL_IP6_DPORT, LWTUNNEL_IP6_FLAGS, __LWTUNNEL_IP6_MAX, }; diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 7b1425a6b370..accb036bbc9c 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -75,5 +75,6 @@ #define ANON_INODE_FS_MAGIC 0x09041934 #define BTRFS_TEST_MAGIC 0x73727279 #define NSFS_MAGIC 0x6e736673 +#define BPF_FS_MAGIC 0xcafe4a11 #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/mic_common.h b/include/uapi/linux/mic_common.h index 302a2ced373c..e9686372029d 100644 --- a/include/uapi/linux/mic_common.h +++ b/include/uapi/linux/mic_common.h @@ -75,12 +75,7 @@ struct mic_device_ctrl { * struct mic_bootparam: Virtio device independent information in device page * * @magic: A magic value used by the card to ensure it can see the host - * @c2h_shutdown_db: Card to Host shutdown doorbell set by host - * @h2c_shutdown_db: Host to Card shutdown doorbell set by card * @h2c_config_db: Host to Card Virtio config doorbell set by card - * @shutdown_status: Card shutdown status set by card - * @shutdown_card: Set to 1 by the host when a card shutdown is initiated - * @tot_nodes: Total number of nodes in the SCIF network * @node_id: Unique id of the node * @h2c_scif_db - Host to card SCIF doorbell set by card * @c2h_scif_db - Card to host SCIF doorbell set by host @@ -89,12 +84,7 @@ struct mic_device_ctrl { */ struct mic_bootparam { __le32 magic; - __s8 c2h_shutdown_db; - __s8 h2c_shutdown_db; __s8 h2c_config_db; - __u8 shutdown_status; - __u8 shutdown_card; - __u8 tot_nodes; __u8 node_id; __u8 h2c_scif_db; __u8 c2h_scif_db; @@ -219,12 +209,12 @@ static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) * enum mic_states - MIC states. */ enum mic_states { - MIC_OFFLINE = 0, + MIC_READY = 0, + MIC_BOOTING, MIC_ONLINE, MIC_SHUTTING_DOWN, + MIC_RESETTING, MIC_RESET_FAILED, - MIC_SUSPENDING, - MIC_SUSPENDED, MIC_LAST }; diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 1f5e68923929..7e385b83b9d8 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -45,8 +45,24 @@ struct mmc_ioc_cmd { }; #define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr -#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) +/** + * struct mmc_ioc_multi_cmd - multi command information + * @num_of_cmds: Number of commands to send. Must be equal to or less than + * MMC_IOC_MAX_CMDS. + * @cmds: Array of commands with length equal to 'num_of_cmds' + */ +struct mmc_ioc_multi_cmd { + __u64 num_of_cmds; + struct mmc_ioc_cmd cmds[0]; +}; +#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) +/* + * MMC_IOC_MULTI_CMD: Used to send an array of MMC commands described by + * the structure mmc_ioc_multi_cmd. The MMC driver will issue all + * commands in array in sequence to card. + */ +#define MMC_IOC_MULTI_CMD _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_multi_cmd) /* * Since this ioctl is only meant to enhance (and not replace) normal access * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES @@ -54,4 +70,5 @@ struct mmc_ioc_cmd { * block device operations. */ #define MMC_IOC_MAX_BYTES (512L * 256) +#define MMC_IOC_MAX_CMDS 255 #endif /* LINUX_MMC_IOCTL_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h index 90c2c9575bac..fb21f0c717a1 100644 --- a/include/uapi/linux/netfilter/nfnetlink_log.h +++ b/include/uapi/linux/netfilter/nfnetlink_log.h @@ -51,6 +51,8 @@ enum nfulnl_attr_type { NFULA_HWTYPE, /* hardware type */ NFULA_HWHEADER, /* hardware header */ NFULA_HWLEN, /* hardware header length */ + NFULA_CT, /* nf_conntrack_netlink.h */ + NFULA_CT_INFO, /* enum ip_conntrack_info */ __NFULA_MAX }; @@ -93,5 +95,6 @@ enum nfulnl_attr_config { #define NFULNL_CFG_F_SEQ 0x0001 #define NFULNL_CFG_F_SEQ_GLOBAL 0x0002 +#define NFULNL_CFG_F_CONNTRACK 0x0004 #endif /* _NFNETLINK_LOG_H */ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 6f3fe16cd22a..f095155d8749 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -54,6 +54,7 @@ struct nlmsghdr { #define NLM_F_ACK 4 /* Reply with ack, with zero or error code */ #define NLM_F_ECHO 8 /* Echo this request */ #define NLM_F_DUMP_INTR 16 /* Dump was inconsistent due to sequence change */ +#define NLM_F_DUMP_FILTERED 32 /* Dump was filtered as requested */ /* Modifiers to GET request */ #define NLM_F_ROOT 0x100 /* specify tree root */ diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index dd3f75389076..399f39ff8048 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -86,6 +86,7 @@ * for this event is the application ID (AID). * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller. * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element. + * @NFC_CMD_ACTIVATE_TARGET: Request NFC controller to reactivate target. * @NFC_CMD_VENDOR: Vendor specific command, to be implemented directly * from the driver in order to support hardware specific operations. */ @@ -156,6 +157,7 @@ enum nfc_commands { * @NFC_ATTR_APDU: Secure element APDU * @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier * @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier + * @NFC_ATTR_SE_PARAMS: Parameters data from an evt_transaction * @NFC_ATTR_VENDOR_ID: NFC manufacturer unique ID, typically an OUI * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c0ab6b0a3919..1f0b4cf5dd03 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -10,6 +10,7 @@ * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> + * Copyright 2015 Intel Deutschland GmbH * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -328,7 +329,15 @@ * partial scan results may be available * * @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain - * intervals, as specified by %NL80211_ATTR_SCHED_SCAN_INTERVAL. + * intervals and certain number of cycles, as specified by + * %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is + * not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified, + * scheduled scan will run in an infinite loop with the specified interval. + * These attributes are mutually exculsive, + * i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if + * NL80211_ATTR_SCHED_SCAN_PLANS is defined. + * If for some reason scheduled scan is aborted by the driver, all scan + * plans are canceled (including scan plans that did not start yet). * Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS) * are passed, they are used in the probe requests. For * broadcast, a broadcast SSID must be passed (ie. an empty @@ -1761,6 +1770,19 @@ enum nl80211_commands { * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device * is operating in an indoor environment. * + * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS: maximum number of scan plans for + * scheduled scan supported by the device (u32), a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL: maximum interval (in seconds) for + * a scan plan (u32), a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS: maximum number of iterations in + * a scan plan (u32), a wiphy attribute. + * @NL80211_ATTR_SCHED_SCAN_PLANS: a list of scan plans for scheduled scan. + * Each scan plan defines the number of scan iterations and the interval + * between scans. The last scan plan will always run infinitely, + * thus it must not specify the number of iterations, only the interval + * between scans. The scan plans are executed sequentially. + * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2130,6 +2152,11 @@ enum nl80211_attrs { NL80211_ATTR_REG_INDOOR, + NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS, + NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL, + NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, + NL80211_ATTR_SCHED_SCAN_PLANS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3364,6 +3391,9 @@ enum nl80211_bss_scan_width { * (not present if no beacon frame has been received yet) * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and * @NL80211_BSS_TSF is known to be from a probe response (flag attribute) + * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry + * was last updated by a received frame. The value is expected to be + * accurate to about 10ms. (u64, nanoseconds) * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3383,6 +3413,7 @@ enum nl80211_bss { NL80211_BSS_CHAN_WIDTH, NL80211_BSS_BEACON_TSF, NL80211_BSS_PRESP_DATA, + NL80211_BSS_LAST_SEEN_BOOTTIME, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -4589,4 +4620,28 @@ enum nl80211_tdls_peer_capability { NL80211_TDLS_PEER_WMM = 1<<2, }; +/** + * enum nl80211_sched_scan_plan - scanning plan for scheduled scan + * @__NL80211_SCHED_SCAN_PLAN_INVALID: attribute number 0 is reserved + * @NL80211_SCHED_SCAN_PLAN_INTERVAL: interval between scan iterations. In + * seconds (u32). + * @NL80211_SCHED_SCAN_PLAN_ITERATIONS: number of scan iterations in this + * scan plan (u32). The last scan plan must not specify this attribute + * because it will run infinitely. A value of zero is invalid as it will + * make the scan plan meaningless. + * @NL80211_SCHED_SCAN_PLAN_MAX: highest scheduled scan plan attribute number + * currently defined + * @__NL80211_SCHED_SCAN_PLAN_AFTER_LAST: internal use + */ +enum nl80211_sched_scan_plan { + __NL80211_SCHED_SCAN_PLAN_INVALID, + NL80211_SCHED_SCAN_PLAN_INTERVAL, + NL80211_SCHED_SCAN_PLAN_ITERATIONS, + + /* keep last */ + __NL80211_SCHED_SCAN_PLAN_AFTER_LAST, + NL80211_SCHED_SCAN_PLAN_MAX = + __NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h deleted file mode 100644 index 8864194a4151..000000000000 --- a/include/uapi/linux/nvme.h +++ /dev/null @@ -1,589 +0,0 @@ -/* - * Definitions for the NVM Express interface - * Copyright (c) 2011-2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef _UAPI_LINUX_NVME_H -#define _UAPI_LINUX_NVME_H - -#include <linux/types.h> - -struct nvme_id_power_state { - __le16 max_power; /* centiwatts */ - __u8 rsvd2; - __u8 flags; - __le32 entry_lat; /* microseconds */ - __le32 exit_lat; /* microseconds */ - __u8 read_tput; - __u8 read_lat; - __u8 write_tput; - __u8 write_lat; - __le16 idle_power; - __u8 idle_scale; - __u8 rsvd19; - __le16 active_power; - __u8 active_work_scale; - __u8 rsvd23[9]; -}; - -enum { - NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, - NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, -}; - -struct nvme_id_ctrl { - __le16 vid; - __le16 ssvid; - char sn[20]; - char mn[40]; - char fr[8]; - __u8 rab; - __u8 ieee[3]; - __u8 mic; - __u8 mdts; - __u16 cntlid; - __u32 ver; - __u8 rsvd84[172]; - __le16 oacs; - __u8 acl; - __u8 aerl; - __u8 frmw; - __u8 lpa; - __u8 elpe; - __u8 npss; - __u8 avscc; - __u8 apsta; - __le16 wctemp; - __le16 cctemp; - __u8 rsvd270[242]; - __u8 sqes; - __u8 cqes; - __u8 rsvd514[2]; - __le32 nn; - __le16 oncs; - __le16 fuses; - __u8 fna; - __u8 vwc; - __le16 awun; - __le16 awupf; - __u8 nvscc; - __u8 rsvd531; - __le16 acwu; - __u8 rsvd534[2]; - __le32 sgls; - __u8 rsvd540[1508]; - struct nvme_id_power_state psd[32]; - __u8 vs[1024]; -}; - -enum { - NVME_CTRL_ONCS_COMPARE = 1 << 0, - NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, - NVME_CTRL_ONCS_DSM = 1 << 2, - NVME_CTRL_VWC_PRESENT = 1 << 0, -}; - -struct nvme_lbaf { - __le16 ms; - __u8 ds; - __u8 rp; -}; - -struct nvme_id_ns { - __le64 nsze; - __le64 ncap; - __le64 nuse; - __u8 nsfeat; - __u8 nlbaf; - __u8 flbas; - __u8 mc; - __u8 dpc; - __u8 dps; - __u8 nmic; - __u8 rescap; - __u8 fpi; - __u8 rsvd33; - __le16 nawun; - __le16 nawupf; - __le16 nacwu; - __le16 nabsn; - __le16 nabo; - __le16 nabspf; - __u16 rsvd46; - __le64 nvmcap[2]; - __u8 rsvd64[40]; - __u8 nguid[16]; - __u8 eui64[8]; - struct nvme_lbaf lbaf[16]; - __u8 rsvd192[192]; - __u8 vs[3712]; -}; - -enum { - NVME_NS_FEAT_THIN = 1 << 0, - NVME_NS_FLBAS_LBA_MASK = 0xf, - NVME_NS_FLBAS_META_EXT = 0x10, - NVME_LBAF_RP_BEST = 0, - NVME_LBAF_RP_BETTER = 1, - NVME_LBAF_RP_GOOD = 2, - NVME_LBAF_RP_DEGRADED = 3, - NVME_NS_DPC_PI_LAST = 1 << 4, - NVME_NS_DPC_PI_FIRST = 1 << 3, - NVME_NS_DPC_PI_TYPE3 = 1 << 2, - NVME_NS_DPC_PI_TYPE2 = 1 << 1, - NVME_NS_DPC_PI_TYPE1 = 1 << 0, - NVME_NS_DPS_PI_FIRST = 1 << 3, - NVME_NS_DPS_PI_MASK = 0x7, - NVME_NS_DPS_PI_TYPE1 = 1, - NVME_NS_DPS_PI_TYPE2 = 2, - NVME_NS_DPS_PI_TYPE3 = 3, -}; - -struct nvme_smart_log { - __u8 critical_warning; - __u8 temperature[2]; - __u8 avail_spare; - __u8 spare_thresh; - __u8 percent_used; - __u8 rsvd6[26]; - __u8 data_units_read[16]; - __u8 data_units_written[16]; - __u8 host_reads[16]; - __u8 host_writes[16]; - __u8 ctrl_busy_time[16]; - __u8 power_cycles[16]; - __u8 power_on_hours[16]; - __u8 unsafe_shutdowns[16]; - __u8 media_errors[16]; - __u8 num_err_log_entries[16]; - __le32 warning_temp_time; - __le32 critical_comp_time; - __le16 temp_sensor[8]; - __u8 rsvd216[296]; -}; - -enum { - NVME_SMART_CRIT_SPARE = 1 << 0, - NVME_SMART_CRIT_TEMPERATURE = 1 << 1, - NVME_SMART_CRIT_RELIABILITY = 1 << 2, - NVME_SMART_CRIT_MEDIA = 1 << 3, - NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, -}; - -enum { - NVME_AER_NOTICE_NS_CHANGED = 0x0002, -}; - -struct nvme_lba_range_type { - __u8 type; - __u8 attributes; - __u8 rsvd2[14]; - __u64 slba; - __u64 nlb; - __u8 guid[16]; - __u8 rsvd48[16]; -}; - -enum { - NVME_LBART_TYPE_FS = 0x01, - NVME_LBART_TYPE_RAID = 0x02, - NVME_LBART_TYPE_CACHE = 0x03, - NVME_LBART_TYPE_SWAP = 0x04, - - NVME_LBART_ATTRIB_TEMP = 1 << 0, - NVME_LBART_ATTRIB_HIDE = 1 << 1, -}; - -struct nvme_reservation_status { - __le32 gen; - __u8 rtype; - __u8 regctl[2]; - __u8 resv5[2]; - __u8 ptpls; - __u8 resv10[13]; - struct { - __le16 cntlid; - __u8 rcsts; - __u8 resv3[5]; - __le64 hostid; - __le64 rkey; - } regctl_ds[]; -}; - -/* I/O commands */ - -enum nvme_opcode { - nvme_cmd_flush = 0x00, - nvme_cmd_write = 0x01, - nvme_cmd_read = 0x02, - nvme_cmd_write_uncor = 0x04, - nvme_cmd_compare = 0x05, - nvme_cmd_write_zeroes = 0x08, - nvme_cmd_dsm = 0x09, - nvme_cmd_resv_register = 0x0d, - nvme_cmd_resv_report = 0x0e, - nvme_cmd_resv_acquire = 0x11, - nvme_cmd_resv_release = 0x15, -}; - -struct nvme_common_command { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __le32 cdw2[2]; - __le64 metadata; - __le64 prp1; - __le64 prp2; - __le32 cdw10[6]; -}; - -struct nvme_rw_command { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2; - __le64 metadata; - __le64 prp1; - __le64 prp2; - __le64 slba; - __le16 length; - __le16 control; - __le32 dsmgmt; - __le32 reftag; - __le16 apptag; - __le16 appmask; -}; - -enum { - NVME_RW_LR = 1 << 15, - NVME_RW_FUA = 1 << 14, - NVME_RW_DSM_FREQ_UNSPEC = 0, - NVME_RW_DSM_FREQ_TYPICAL = 1, - NVME_RW_DSM_FREQ_RARE = 2, - NVME_RW_DSM_FREQ_READS = 3, - NVME_RW_DSM_FREQ_WRITES = 4, - NVME_RW_DSM_FREQ_RW = 5, - NVME_RW_DSM_FREQ_ONCE = 6, - NVME_RW_DSM_FREQ_PREFETCH = 7, - NVME_RW_DSM_FREQ_TEMP = 8, - NVME_RW_DSM_LATENCY_NONE = 0 << 4, - NVME_RW_DSM_LATENCY_IDLE = 1 << 4, - NVME_RW_DSM_LATENCY_NORM = 2 << 4, - NVME_RW_DSM_LATENCY_LOW = 3 << 4, - NVME_RW_DSM_SEQ_REQ = 1 << 6, - NVME_RW_DSM_COMPRESSED = 1 << 7, - NVME_RW_PRINFO_PRCHK_REF = 1 << 10, - NVME_RW_PRINFO_PRCHK_APP = 1 << 11, - NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, - NVME_RW_PRINFO_PRACT = 1 << 13, -}; - -struct nvme_dsm_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; - __le32 nr; - __le32 attributes; - __u32 rsvd12[4]; -}; - -enum { - NVME_DSMGMT_IDR = 1 << 0, - NVME_DSMGMT_IDW = 1 << 1, - NVME_DSMGMT_AD = 1 << 2, -}; - -struct nvme_dsm_range { - __le32 cattr; - __le32 nlb; - __le64 slba; -}; - -/* Admin commands */ - -enum nvme_admin_opcode { - nvme_admin_delete_sq = 0x00, - nvme_admin_create_sq = 0x01, - nvme_admin_get_log_page = 0x02, - nvme_admin_delete_cq = 0x04, - nvme_admin_create_cq = 0x05, - nvme_admin_identify = 0x06, - nvme_admin_abort_cmd = 0x08, - nvme_admin_set_features = 0x09, - nvme_admin_get_features = 0x0a, - nvme_admin_async_event = 0x0c, - nvme_admin_activate_fw = 0x10, - nvme_admin_download_fw = 0x11, - nvme_admin_format_nvm = 0x80, - nvme_admin_security_send = 0x81, - nvme_admin_security_recv = 0x82, -}; - -enum { - NVME_QUEUE_PHYS_CONTIG = (1 << 0), - NVME_CQ_IRQ_ENABLED = (1 << 1), - NVME_SQ_PRIO_URGENT = (0 << 1), - NVME_SQ_PRIO_HIGH = (1 << 1), - NVME_SQ_PRIO_MEDIUM = (2 << 1), - NVME_SQ_PRIO_LOW = (3 << 1), - NVME_FEAT_ARBITRATION = 0x01, - NVME_FEAT_POWER_MGMT = 0x02, - NVME_FEAT_LBA_RANGE = 0x03, - NVME_FEAT_TEMP_THRESH = 0x04, - NVME_FEAT_ERR_RECOVERY = 0x05, - NVME_FEAT_VOLATILE_WC = 0x06, - NVME_FEAT_NUM_QUEUES = 0x07, - NVME_FEAT_IRQ_COALESCE = 0x08, - NVME_FEAT_IRQ_CONFIG = 0x09, - NVME_FEAT_WRITE_ATOMIC = 0x0a, - NVME_FEAT_ASYNC_EVENT = 0x0b, - NVME_FEAT_AUTO_PST = 0x0c, - NVME_FEAT_SW_PROGRESS = 0x80, - NVME_FEAT_HOST_ID = 0x81, - NVME_FEAT_RESV_MASK = 0x82, - NVME_FEAT_RESV_PERSIST = 0x83, - NVME_LOG_ERROR = 0x01, - NVME_LOG_SMART = 0x02, - NVME_LOG_FW_SLOT = 0x03, - NVME_LOG_RESERVATION = 0x80, - NVME_FWACT_REPL = (0 << 3), - NVME_FWACT_REPL_ACTV = (1 << 3), - NVME_FWACT_ACTV = (2 << 3), -}; - -struct nvme_identify { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; - __le32 cns; - __u32 rsvd11[5]; -}; - -struct nvme_features { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2[2]; - __le64 prp1; - __le64 prp2; - __le32 fid; - __le32 dword11; - __u32 rsvd12[4]; -}; - -struct nvme_create_cq { - __u8 opcode; - __u8 flags; - __u16 command_id; - __u32 rsvd1[5]; - __le64 prp1; - __u64 rsvd8; - __le16 cqid; - __le16 qsize; - __le16 cq_flags; - __le16 irq_vector; - __u32 rsvd12[4]; -}; - -struct nvme_create_sq { - __u8 opcode; - __u8 flags; - __u16 command_id; - __u32 rsvd1[5]; - __le64 prp1; - __u64 rsvd8; - __le16 sqid; - __le16 qsize; - __le16 sq_flags; - __le16 cqid; - __u32 rsvd12[4]; -}; - -struct nvme_delete_queue { - __u8 opcode; - __u8 flags; - __u16 command_id; - __u32 rsvd1[9]; - __le16 qid; - __u16 rsvd10; - __u32 rsvd11[5]; -}; - -struct nvme_abort_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __u32 rsvd1[9]; - __le16 sqid; - __u16 cid; - __u32 rsvd11[5]; -}; - -struct nvme_download_firmware { - __u8 opcode; - __u8 flags; - __u16 command_id; - __u32 rsvd1[5]; - __le64 prp1; - __le64 prp2; - __le32 numd; - __le32 offset; - __u32 rsvd12[4]; -}; - -struct nvme_format_cmd { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd2[4]; - __le32 cdw10; - __u32 rsvd11[5]; -}; - -struct nvme_command { - union { - struct nvme_common_command common; - struct nvme_rw_command rw; - struct nvme_identify identify; - struct nvme_features features; - struct nvme_create_cq create_cq; - struct nvme_create_sq create_sq; - struct nvme_delete_queue delete_queue; - struct nvme_download_firmware dlfw; - struct nvme_format_cmd format; - struct nvme_dsm_cmd dsm; - struct nvme_abort_cmd abort; - }; -}; - -enum { - NVME_SC_SUCCESS = 0x0, - NVME_SC_INVALID_OPCODE = 0x1, - NVME_SC_INVALID_FIELD = 0x2, - NVME_SC_CMDID_CONFLICT = 0x3, - NVME_SC_DATA_XFER_ERROR = 0x4, - NVME_SC_POWER_LOSS = 0x5, - NVME_SC_INTERNAL = 0x6, - NVME_SC_ABORT_REQ = 0x7, - NVME_SC_ABORT_QUEUE = 0x8, - NVME_SC_FUSED_FAIL = 0x9, - NVME_SC_FUSED_MISSING = 0xa, - NVME_SC_INVALID_NS = 0xb, - NVME_SC_CMD_SEQ_ERROR = 0xc, - NVME_SC_SGL_INVALID_LAST = 0xd, - NVME_SC_SGL_INVALID_COUNT = 0xe, - NVME_SC_SGL_INVALID_DATA = 0xf, - NVME_SC_SGL_INVALID_METADATA = 0x10, - NVME_SC_SGL_INVALID_TYPE = 0x11, - NVME_SC_LBA_RANGE = 0x80, - NVME_SC_CAP_EXCEEDED = 0x81, - NVME_SC_NS_NOT_READY = 0x82, - NVME_SC_RESERVATION_CONFLICT = 0x83, - NVME_SC_CQ_INVALID = 0x100, - NVME_SC_QID_INVALID = 0x101, - NVME_SC_QUEUE_SIZE = 0x102, - NVME_SC_ABORT_LIMIT = 0x103, - NVME_SC_ABORT_MISSING = 0x104, - NVME_SC_ASYNC_LIMIT = 0x105, - NVME_SC_FIRMWARE_SLOT = 0x106, - NVME_SC_FIRMWARE_IMAGE = 0x107, - NVME_SC_INVALID_VECTOR = 0x108, - NVME_SC_INVALID_LOG_PAGE = 0x109, - NVME_SC_INVALID_FORMAT = 0x10a, - NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, - NVME_SC_INVALID_QUEUE = 0x10c, - NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, - NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, - NVME_SC_FEATURE_NOT_PER_NS = 0x10f, - NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, - NVME_SC_BAD_ATTRIBUTES = 0x180, - NVME_SC_INVALID_PI = 0x181, - NVME_SC_READ_ONLY = 0x182, - NVME_SC_WRITE_FAULT = 0x280, - NVME_SC_READ_ERROR = 0x281, - NVME_SC_GUARD_CHECK = 0x282, - NVME_SC_APPTAG_CHECK = 0x283, - NVME_SC_REFTAG_CHECK = 0x284, - NVME_SC_COMPARE_FAILED = 0x285, - NVME_SC_ACCESS_DENIED = 0x286, - NVME_SC_DNR = 0x4000, -}; - -struct nvme_completion { - __le32 result; /* Used by admin commands to return data */ - __u32 rsvd; - __le16 sq_head; /* how much of this queue may be reclaimed */ - __le16 sq_id; /* submission queue that generated this entry */ - __u16 command_id; /* of the command which completed */ - __le16 status; /* did the command fail, and if so, why? */ -}; - -struct nvme_user_io { - __u8 opcode; - __u8 flags; - __u16 control; - __u16 nblocks; - __u16 rsvd; - __u64 metadata; - __u64 addr; - __u64 slba; - __u32 dsmgmt; - __u32 reftag; - __u16 apptag; - __u16 appmask; -}; - -struct nvme_passthru_cmd { - __u8 opcode; - __u8 flags; - __u16 rsvd1; - __u32 nsid; - __u32 cdw2; - __u32 cdw3; - __u64 metadata; - __u64 addr; - __u32 metadata_len; - __u32 data_len; - __u32 cdw10; - __u32 cdw11; - __u32 cdw12; - __u32 cdw13; - __u32 cdw14; - __u32 cdw15; - __u32 timeout_ms; - __u32 result; -}; - -#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) - -#define nvme_admin_cmd nvme_passthru_cmd - -#define NVME_IOCTL_ID _IO('N', 0x40) -#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) -#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) -#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) -#define NVME_IOCTL_RESET _IO('N', 0x44) -#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) - -#endif /* _UAPI_LINUX_NVME_H */ diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h new file mode 100644 index 000000000000..c4b2a3f90829 --- /dev/null +++ b/include/uapi/linux/nvme_ioctl.h @@ -0,0 +1,65 @@ +/* + * Definitions for the NVM Express ioctl interface + * Copyright (c) 2011-2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _UAPI_LINUX_NVME_IOCTL_H +#define _UAPI_LINUX_NVME_IOCTL_H + +#include <linux/types.h> + +struct nvme_user_io { + __u8 opcode; + __u8 flags; + __u16 control; + __u16 nblocks; + __u16 rsvd; + __u64 metadata; + __u64 addr; + __u64 slba; + __u32 dsmgmt; + __u32 reftag; + __u16 apptag; + __u16 appmask; +}; + +struct nvme_passthru_cmd { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u32 result; +}; + +#define nvme_admin_cmd nvme_passthru_cmd + +#define NVME_IOCTL_ID _IO('N', 0x40) +#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) +#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) +#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) +#define NVME_IOCTL_RESET _IO('N', 0x44) +#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) + +#endif /* _UAPI_LINUX_NVME_IOCTL_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 32e07d8cbaf4..28ccedd000f5 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -323,10 +323,10 @@ enum ovs_key_attr { OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. * The implementation may restrict * the accepted length of the array. */ - OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */ + OVS_KEY_ATTR_CT_STATE, /* u32 bitmask of OVS_CS_F_* */ OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ - OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */ + OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ @@ -349,6 +349,8 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */ OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */ OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested OVS_VXLAN_EXT_* */ + OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ + OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ __OVS_TUNNEL_KEY_ATTR_MAX }; @@ -439,9 +441,9 @@ struct ovs_key_nd { __u8 nd_tll[ETH_ALEN]; }; -#define OVS_CT_LABEL_LEN 16 -struct ovs_key_ct_label { - __u8 ct_label[OVS_CT_LABEL_LEN]; +#define OVS_CT_LABELS_LEN 16 +struct ovs_key_ct_labels { + __u8 ct_labels[OVS_CT_LABELS_LEN]; }; /* OVS_KEY_ATTR_CT_STATE flags */ @@ -449,9 +451,9 @@ struct ovs_key_ct_label { #define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */ #define OVS_CS_F_RELATED 0x04 /* Related to an established * connection. */ -#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */ -#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */ -#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */ +#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */ +#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */ +#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */ /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. @@ -618,22 +620,25 @@ struct ovs_action_hash { /** * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action. - * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags. + * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack + * table. This allows future packets for the same connection to be identified + * as 'established' or 'related'. The flow key for the current packet will + * retain the pre-commit connection state. * @OVS_CT_ATTR_ZONE: u16 connection tracking zone. * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the * mask, the corresponding bit in the value is copied to the connection * tracking mark field in the connection. - * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN + * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN * mask. For each bit set in the mask, the corresponding bit in the value is * copied to the connection tracking label field in the connection. * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, - OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */ + OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */ OVS_CT_ATTR_ZONE, /* u16 zone id. */ OVS_CT_ATTR_MARK, /* mark to associate with this connection. */ - OVS_CT_ATTR_LABEL, /* label to associate with this connection. */ + OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ __OVS_CT_ATTR_MAX @@ -641,14 +646,6 @@ enum ovs_ct_attr { #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) -/* - * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_* - * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows - * future packets for the same connection to be identified as 'established' - * or 'related'. - */ -#define OVS_CT_F_COMMIT 0x01 - /** * enum ovs_action_attr - Action types. * @@ -705,7 +702,7 @@ enum ovs_action_attr { * data immediately followed by a mask. * The data must be zero for the unmasked * bits. */ - OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */ + OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 413417f3707b..1becea86c73c 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -216,7 +216,8 @@ #define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ #define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */ #define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ -#define PCI_CAP_ID_MAX PCI_CAP_ID_AF +#define PCI_CAP_ID_EA 0x14 /* PCI Enhanced Allocation */ +#define PCI_CAP_ID_MAX PCI_CAP_ID_EA #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ #define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ #define PCI_CAP_SIZEOF 4 @@ -353,6 +354,46 @@ #define PCI_AF_STATUS_TP 0x01 #define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */ +/* PCI Enhanced Allocation registers */ + +#define PCI_EA_NUM_ENT 2 /* Number of Capability Entries */ +#define PCI_EA_NUM_ENT_MASK 0x3f /* Num Entries Mask */ +#define PCI_EA_FIRST_ENT 4 /* First EA Entry in List */ +#define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */ +#define PCI_EA_ES 0x00000007 /* Entry Size */ +#define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */ +/* 0-5 map to BARs 0-5 respectively */ +#define PCI_EA_BEI_BAR0 0 +#define PCI_EA_BEI_BAR5 5 +#define PCI_EA_BEI_BRIDGE 6 /* Resource behind bridge */ +#define PCI_EA_BEI_ENI 7 /* Equivalent Not Indicated */ +#define PCI_EA_BEI_ROM 8 /* Expansion ROM */ +/* 9-14 map to VF BARs 0-5 respectively */ +#define PCI_EA_BEI_VF_BAR0 9 +#define PCI_EA_BEI_VF_BAR5 14 +#define PCI_EA_BEI_RESERVED 15 /* Reserved - Treat like ENI */ +#define PCI_EA_PP 0x0000ff00 /* Primary Properties */ +#define PCI_EA_SP 0x00ff0000 /* Secondary Properties */ +#define PCI_EA_P_MEM 0x00 /* Non-Prefetch Memory */ +#define PCI_EA_P_MEM_PREFETCH 0x01 /* Prefetchable Memory */ +#define PCI_EA_P_IO 0x02 /* I/O Space */ +#define PCI_EA_P_VF_MEM_PREFETCH 0x03 /* VF Prefetchable Memory */ +#define PCI_EA_P_VF_MEM 0x04 /* VF Non-Prefetch Memory */ +#define PCI_EA_P_BRIDGE_MEM 0x05 /* Bridge Non-Prefetch Memory */ +#define PCI_EA_P_BRIDGE_MEM_PREFETCH 0x06 /* Bridge Prefetchable Memory */ +#define PCI_EA_P_BRIDGE_IO 0x07 /* Bridge I/O Space */ +/* 0x08-0xfc reserved */ +#define PCI_EA_P_MEM_RESERVED 0xfd /* Reserved Memory */ +#define PCI_EA_P_IO_RESERVED 0xfe /* Reserved I/O Space */ +#define PCI_EA_P_UNAVAILABLE 0xff /* Entry Unavailable */ +#define PCI_EA_WRITABLE 0x40000000 /* Writable: 1 = RW, 0 = HwInit */ +#define PCI_EA_ENABLE 0x80000000 /* Enable for this entry */ +#define PCI_EA_BASE 4 /* Base Address Offset */ +#define PCI_EA_MAX_OFFSET 8 /* MaxOffset (resource length) */ +/* bit 0 is reserved */ +#define PCI_EA_IS_64 0x00000002 /* 64-bit field flag */ +#define PCI_EA_FIELD_MASK 0xfffffffc /* For Base & Max Offset */ + /* PCI-X registers (Type 0 (non-bridge) devices) */ #define PCI_X_CMD 2 /* Modes & Features */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 2881145cda86..d801bb0d9f6d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -110,6 +110,7 @@ enum perf_sw_ids { PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, PERF_COUNT_SW_MAX, /* non-ABI */ }; @@ -168,6 +169,7 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ + PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -188,6 +190,7 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, + PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -476,7 +479,7 @@ struct perf_event_mmap_page { * u64 delta; * * quot = (cyc >> time_shift); - * rem = cyc & ((1 << time_shift) - 1); + * rem = cyc & (((u64)1 << time_shift) - 1); * delta = time_offset + quot * time_mult + * ((rem * time_mult) >> time_shift); * @@ -507,7 +510,7 @@ struct perf_event_mmap_page { * And vice versa: * * quot = cyc >> time_shift; - * rem = cyc & ((1 << time_shift) - 1); + * rem = cyc & (((u64)1 << time_shift) - 1); * timestamp = time_zero + quot * time_mult + * ((rem * time_mult) >> time_shift); */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 4f0d1bc3647d..439873775d49 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -87,6 +87,7 @@ enum { #define TC_ACT_STOLEN 4 #define TC_ACT_QUEUED 5 #define TC_ACT_REPEAT 6 +#define TC_ACT_REDIRECT 7 #define TC_ACT_JUMP 0x10000000 /* Action type identifiers*/ @@ -373,6 +374,8 @@ enum { /* BPF classifier */ +#define TCA_BPF_FLAG_ACT_DIRECT (1 << 0) + enum { TCA_BPF_UNSPEC, TCA_BPF_ACT, @@ -382,6 +385,7 @@ enum { TCA_BPF_OPS, TCA_BPF_FD, TCA_BPF_NAME, + TCA_BPF_FLAGS, __TCA_BPF_MAX, }; diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h new file mode 100644 index 000000000000..57d7c0f916b6 --- /dev/null +++ b/include/uapi/linux/pr.h @@ -0,0 +1,48 @@ +#ifndef _UAPI_PR_H +#define _UAPI_PR_H + +enum pr_type { + PR_WRITE_EXCLUSIVE = 1, + PR_EXCLUSIVE_ACCESS = 2, + PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +struct pr_reservation { + __u64 key; + __u32 type; + __u32 flags; +}; + +struct pr_registration { + __u64 old_key; + __u64 new_key; + __u32 flags; + __u32 __pad; +}; + +struct pr_preempt { + __u64 old_key; + __u64 new_key; + __u32 type; + __u32 flags; +}; + +struct pr_clear { + __u64 key; + __u32 flags; + __u32 __pad; +}; + +#define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */ + +#define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration) +#define IOC_PR_RESERVE _IOW('p', 201, struct pr_reservation) +#define IOC_PR_RELEASE _IOW('p', 202, struct pr_reservation) +#define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt) +#define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt) +#define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear) + +#endif /* _UAPI_PR_H */ diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index a7a697986614..fb8106509000 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -64,6 +64,8 @@ struct ptrace_peeksiginfo_args { #define PTRACE_GETSIGMASK 0x420a #define PTRACE_SETSIGMASK 0x420b +#define PTRACE_SECCOMP_GET_FILTER 0x420c + /* Read signals from a shared (process wide) queue */ #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index 2ae6131e69a5..c3e654c6d518 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -89,6 +89,12 @@ * read requests will only be sent here in * dire need */ +#define MD_DISK_JOURNAL 18 /* disk is used as the write journal in RAID-5/6 */ + +#define MD_DISK_ROLE_SPARE 0xffff +#define MD_DISK_ROLE_FAULTY 0xfffe +#define MD_DISK_ROLE_JOURNAL 0xfffd +#define MD_DISK_ROLE_MAX 0xff00 /* max value of regular disk role */ typedef struct mdp_device_descriptor_s { __u32 number; /* 0 Device number in the entire set */ @@ -252,7 +258,10 @@ struct mdp_superblock_1 { __le64 data_offset; /* sector start of data, often 0 */ __le64 data_size; /* sectors in this device that can be used for data */ __le64 super_offset; /* sector start of this superblock */ - __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ + union { + __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */ + __le64 journal_tail;/* journal tail of journal device (from data_offset) */ + }; __le32 dev_number; /* permanent identifier of this device - not role in raid */ __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ @@ -302,6 +311,8 @@ struct mdp_superblock_1 { #define MD_FEATURE_RECOVERY_BITMAP 128 /* recovery that is happening * is guided by bitmap. */ +#define MD_FEATURE_CLUSTERED 256 /* clustered MD */ +#define MD_FEATURE_JOURNAL 512 /* support write cache */ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RESHAPE_ACTIVE \ @@ -310,6 +321,66 @@ struct mdp_superblock_1 { |MD_FEATURE_RESHAPE_BACKWARDS \ |MD_FEATURE_NEW_OFFSET \ |MD_FEATURE_RECOVERY_BITMAP \ + |MD_FEATURE_CLUSTERED \ + |MD_FEATURE_JOURNAL \ ) +struct r5l_payload_header { + __le16 type; + __le16 flags; +} __attribute__ ((__packed__)); + +enum r5l_payload_type { + R5LOG_PAYLOAD_DATA = 0, + R5LOG_PAYLOAD_PARITY = 1, + R5LOG_PAYLOAD_FLUSH = 2, +}; + +struct r5l_payload_data_parity { + struct r5l_payload_header header; + __le32 size; /* sector. data/parity size. each 4k + * has a checksum */ + __le64 location; /* sector. For data, it's raid sector. For + * parity, it's stripe sector */ + __le32 checksum[]; +} __attribute__ ((__packed__)); + +enum r5l_payload_data_parity_flag { + R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */ + /* + * RESHAPED/RESHAPING is only set when there is reshape activity. Note, + * both data/parity of a stripe should have the same flag set + * + * RESHAPED: reshape is running, and this stripe finished reshape + * RESHAPING: reshape is running, and this stripe isn't reshaped + */ + R5LOG_PAYLOAD_FLAG_RESHAPED = 2, + R5LOG_PAYLOAD_FLAG_RESHAPING = 3, +}; + +struct r5l_payload_flush { + struct r5l_payload_header header; + __le32 size; /* flush_stripes size, bytes */ + __le64 flush_stripes[]; +} __attribute__ ((__packed__)); + +enum r5l_payload_flush_flag { + R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */ +}; + +struct r5l_meta_block { + __le32 magic; + __le32 checksum; + __u8 version; + __u8 __zero_pading_1; + __le16 __zero_pading_2; + __le32 meta_size; /* whole size of the block */ + + __le64 seq; + __le64 position; /* sector, start from rdev->data_offset, current position */ + struct r5l_payload_header payloads[]; +} __attribute__ ((__packed__)); + +#define R5LOG_VERSION 0x1 +#define R5LOG_MAGIC 0x6433c509 #endif diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 702024769c74..123a5af4e8bb 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -160,7 +160,7 @@ struct rtattr { /* Macros to handle rtattributes */ -#define RTA_ALIGNTO 4 +#define RTA_ALIGNTO 4U #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \ (rta)->rta_len >= sizeof(struct rtattr) && \ @@ -270,6 +270,7 @@ enum rt_scope_t { #define RTM_F_CLONED 0x200 /* This route is cloned */ #define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */ #define RTM_F_PREFIX 0x800 /* Prefix addresses */ +#define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */ /* Reserved table identifiers */ @@ -666,6 +667,7 @@ struct tcamsg { #define RTEXT_FILTER_VF (1 << 0) #define RTEXT_FILTER_BRVLAN (1 << 1) #define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2) +#define RTEXT_FILTER_SKIP_STATS (1 << 3) /* End of information exported to user level */ diff --git a/include/uapi/linux/scif_ioctl.h b/include/uapi/linux/scif_ioctl.h index 4a94d917cf99..d9048918be52 100644 --- a/include/uapi/linux/scif_ioctl.h +++ b/include/uapi/linux/scif_ioctl.h @@ -107,6 +107,82 @@ struct scifioctl_msg { }; /** + * struct scifioctl_reg - used for SCIF_REG IOCTL + * @addr: starting virtual address + * @len: length of range + * @offset: offset of window + * @prot: read/write protection + * @flags: flags + * @out_offset: offset returned + */ +struct scifioctl_reg { + __u64 addr; + __u64 len; + __s64 offset; + __s32 prot; + __s32 flags; + __s64 out_offset; +}; + +/** + * struct scifioctl_unreg - used for SCIF_UNREG IOCTL + * @offset: start of range to unregister + * @len: length of range to unregister + */ +struct scifioctl_unreg { + __s64 offset; + __u64 len; +}; + +/** + * struct scifioctl_copy - used for SCIF DMA copy IOCTLs + * + * @loffset: offset in local registered address space to/from + * which to copy + * @len: length of range to copy + * @roffset: offset in remote registered address space to/from + * which to copy + * @addr: user virtual address to/from which to copy + * @flags: flags + * + * This structure is used for SCIF_READFROM, SCIF_WRITETO, SCIF_VREADFROM + * and SCIF_VREADFROM IOCTL's. + */ +struct scifioctl_copy { + __s64 loffset; + __u64 len; + __s64 roffset; + __u64 addr; + __s32 flags; +}; + +/** + * struct scifioctl_fence_mark - used for SCIF_FENCE_MARK IOCTL + * @flags: flags + * @mark: fence handle which is a pointer to a __s32 + */ +struct scifioctl_fence_mark { + __s32 flags; + __u64 mark; +}; + +/** + * struct scifioctl_fence_signal - used for SCIF_FENCE_SIGNAL IOCTL + * @loff: local offset + * @lval: value to write to loffset + * @roff: remote offset + * @rval: value to write to roffset + * @flags: flags + */ +struct scifioctl_fence_signal { + __s64 loff; + __u64 lval; + __s64 roff; + __u64 rval; + __s32 flags; +}; + +/** * struct scifioctl_node_ids - used for SCIF_GET_NODEIDS IOCTL * @nodes: pointer to an array of node_ids * @self: ID of the current node @@ -125,6 +201,15 @@ struct scifioctl_node_ids { #define SCIF_ACCEPTREG _IOWR('s', 5, __u64) #define SCIF_SEND _IOWR('s', 6, struct scifioctl_msg) #define SCIF_RECV _IOWR('s', 7, struct scifioctl_msg) +#define SCIF_REG _IOWR('s', 8, struct scifioctl_reg) +#define SCIF_UNREG _IOWR('s', 9, struct scifioctl_unreg) +#define SCIF_READFROM _IOWR('s', 10, struct scifioctl_copy) +#define SCIF_WRITETO _IOWR('s', 11, struct scifioctl_copy) +#define SCIF_VREADFROM _IOWR('s', 12, struct scifioctl_copy) +#define SCIF_VWRITETO _IOWR('s', 13, struct scifioctl_copy) #define SCIF_GET_NODEIDS _IOWR('s', 14, struct scifioctl_node_ids) +#define SCIF_FENCE_MARK _IOWR('s', 15, struct scifioctl_fence_mark) +#define SCIF_FENCE_WAIT _IOWR('s', 16, __s32) +#define SCIF_FENCE_SIGNAL _IOWR('s', 17, struct scifioctl_fence_signal) #endif /* SCIF_IOCTL_H */ diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h index 7530e7447620..8b8d39dfb67f 100644 --- a/include/uapi/linux/screen_info.h +++ b/include/uapi/linux/screen_info.h @@ -43,7 +43,8 @@ struct screen_info { __u16 pages; /* 0x32 */ __u16 vesa_attributes; /* 0x34 */ __u32 capabilities; /* 0x36 */ - __u8 _reserved[6]; /* 0x3a */ + __u32 ext_lfb_base; /* 0x3a */ + __u8 _reserved[2]; /* 0x3e */ } __attribute__((packed)); #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ @@ -69,6 +70,6 @@ struct screen_info { #define VIDEO_FLAGS_NOCURSOR (1 << 0) /* The video mode has no cursor set */ #define VIDEO_CAPABILITY_SKIP_QUIRKS (1 << 0) - +#define VIDEO_CAPABILITY_64BIT_BASE (1 << 1) /* Frame buffer base is 64-bit */ #endif /* _UAPI_SCREEN_INFO_H */ diff --git a/include/uapi/linux/stm.h b/include/uapi/linux/stm.h new file mode 100644 index 000000000000..626a8d3f63b5 --- /dev/null +++ b/include/uapi/linux/stm.h @@ -0,0 +1,50 @@ +/* + * System Trace Module (STM) userspace interfaces + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * STM class implements generic infrastructure for System Trace Module devices + * as defined in MIPI STPv2 specification. + */ + +#ifndef _UAPI_LINUX_STM_H +#define _UAPI_LINUX_STM_H + +#include <linux/types.h> + +/** + * struct stp_policy_id - identification for the STP policy + * @size: size of the structure including real id[] length + * @master: assigned master + * @channel: first assigned channel + * @width: number of requested channels + * @id: identification string + * + * User must calculate the total size of the structure and put it into + * @size field, fill out the @id and desired @width. In return, kernel + * fills out @master, @channel and @width. + */ +struct stp_policy_id { + __u32 size; + __u16 master; + __u16 channel; + __u16 width; + /* padding */ + __u16 __reserved_0; + __u32 __reserved_1; + char id[0]; +}; + +#define STP_POLICY_ID_SET _IOWR('%', 0, struct stp_policy_id) +#define STP_POLICY_ID_GET _IOR('%', 1, struct stp_policy_id) +#define STP_SET_OPTIONS _IOW('%', 2, __u64) + +#endif /* _UAPI_LINUX_STM_H */ diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h index b6a9cdd6e096..e2bc417b243b 100644 --- a/include/uapi/linux/usb/cdc.h +++ b/include/uapi/linux/usb/cdc.h @@ -6,8 +6,8 @@ * firmware based USB peripherals. */ -#ifndef __LINUX_USB_CDC_H -#define __LINUX_USB_CDC_H +#ifndef __UAPI_LINUX_USB_CDC_H +#define __UAPI_LINUX_USB_CDC_H #include <linux/types.h> @@ -444,4 +444,4 @@ struct usb_cdc_ncm_ndp_input_size { #define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 #define USB_CDC_NCM_CRC_APPENDED 0x01 -#endif /* __LINUX_USB_CDC_H */ +#endif /* __UAPI_LINUX_USB_CDC_H */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index f7adc6e01f9e..4338eb7b09b3 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -866,6 +866,35 @@ struct usb_ss_container_id_descriptor { } __attribute__((packed)); #define USB_DT_USB_SS_CONTN_ID_SIZE 20 + +/* + * SuperSpeed Plus USB Capability descriptor: Defines the set of + * SuperSpeed Plus USB specific device level capabilities + */ +#define USB_SSP_CAP_TYPE 0xa +struct usb_ssp_cap_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __le32 bmAttributes; +#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */ +#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */ + __u16 wFunctionalitySupport; +#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf) +#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8) +#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12) + __le16 wReserved; + __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */ +#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */ +#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */ +#define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */ +#define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */ +#define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */ +#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */ +} __attribute__((packed)); + + /*-------------------------------------------------------------------------*/ /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,8 +11,6 @@ #include <linux/types.h> -#include <linux/compiler.h> - #define UFFD_API ((__u64)0xAA) /* * After implementing the respective features it will become: diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index d448c536b49d..1bdce501ad6b 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -936,6 +936,7 @@ enum v4l2_deemphasis { #define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 11) #define V4L2_CID_RF_TUNER_BANDWIDTH (V4L2_CID_RF_TUNER_CLASS_BASE + 12) +#define V4L2_CID_RF_TUNER_RF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 32) #define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 41) #define V4L2_CID_RF_TUNER_LNA_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 42) #define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 51) diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 3228fbebcd63..a0e87d16b726 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -145,6 +145,7 @@ enum v4l2_buf_type { V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10, V4L2_BUF_TYPE_SDR_CAPTURE = 11, + V4L2_BUF_TYPE_SDR_OUTPUT = 12, /* Deprecated, do not use */ V4L2_BUF_TYPE_PRIVATE = 0x80, }; @@ -159,16 +160,20 @@ enum v4l2_buf_type { || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \ || (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \ || (type) == V4L2_BUF_TYPE_VBI_OUTPUT \ - || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) + || (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \ + || (type) == V4L2_BUF_TYPE_SDR_OUTPUT) enum v4l2_tuner_type { V4L2_TUNER_RADIO = 1, V4L2_TUNER_ANALOG_TV = 2, V4L2_TUNER_DIGITAL_TV = 3, - V4L2_TUNER_ADC = 4, + V4L2_TUNER_SDR = 4, V4L2_TUNER_RF = 5, }; +/* Deprecated, do not use */ +#define V4L2_TUNER_ADC V4L2_TUNER_SDR + enum v4l2_memory { V4L2_MEMORY_MMAP = 1, V4L2_MEMORY_USERPTR = 2, @@ -229,6 +234,9 @@ enum v4l2_colorspace { /* Raw colorspace: for RAW unprocessed images */ V4L2_COLORSPACE_RAW = 11, + + /* DCI-P3 colorspace, used by cinema projectors */ + V4L2_COLORSPACE_DCI_P3 = 12, }; /* @@ -256,6 +264,8 @@ enum v4l2_xfer_func { * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M * * V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE + * + * V4L2_COLORSPACE_DCI_P3: V4L2_XFER_FUNC_DCI_P3 */ V4L2_XFER_FUNC_DEFAULT = 0, V4L2_XFER_FUNC_709 = 1, @@ -263,6 +273,8 @@ enum v4l2_xfer_func { V4L2_XFER_FUNC_ADOBERGB = 3, V4L2_XFER_FUNC_SMPTE240M = 4, V4L2_XFER_FUNC_NONE = 5, + V4L2_XFER_FUNC_DCI_P3 = 6, + V4L2_XFER_FUNC_SMPTE2084 = 7, }; /* @@ -272,9 +284,10 @@ enum v4l2_xfer_func { #define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \ ((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \ ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \ - ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \ - ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \ - V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709)))) + ((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \ + ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \ + ((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \ + V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709))))) enum v4l2_ycbcr_encoding { /* @@ -285,7 +298,7 @@ enum v4l2_ycbcr_encoding { * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601 * - * V4L2_COLORSPACE_REC709: V4L2_YCBCR_ENC_709 + * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709 * * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC * @@ -325,7 +338,8 @@ enum v4l2_ycbcr_encoding { * This depends on the colorspace. */ #define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \ - ((colsp) == V4L2_COLORSPACE_REC709 ? V4L2_YCBCR_ENC_709 : \ + (((colsp) == V4L2_COLORSPACE_REC709 || \ + (colsp) == V4L2_COLORSPACE_DCI_P3) ? V4L2_YCBCR_ENC_709 : \ ((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \ ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \ V4L2_YCBCR_ENC_601))) @@ -423,6 +437,7 @@ struct v4l2_capability { #define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */ #define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */ +#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */ #define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */ #define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */ @@ -1578,7 +1593,8 @@ struct v4l2_modulator { __u32 rangelow; __u32 rangehigh; __u32 txsubchans; - __u32 reserved[4]; + __u32 type; /* enum v4l2_tuner_type */ + __u32 reserved[3]; }; /* Flags for the 'capability' field */ @@ -2271,7 +2287,7 @@ struct v4l2_create_buffers { #define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl) /* Reminder: when adding new ioctls please add support for them to - drivers/media/video/v4l2-compat-ioctl32.c as well! */ + drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */ #define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 78c442fbf263..599562fe5d57 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -88,7 +88,7 @@ #define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */ #define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */ #define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */ -/* 1UL << 5 reserved */ +/* 1UL << 5 unused */ #define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */ #define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/ #define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */ @@ -99,7 +99,7 @@ #define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */ #define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */ #define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */ -#define HFI1_CAP_QSFP_ENABLED (1UL << 16) /* Enable QSFP check during LNI */ +/* 1UL << 16 unused */ #define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */ #define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */ diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 247c50bd60f0..26539a7e4880 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -83,7 +83,7 @@ #define SND_SOC_TPLG_NUM_TEXTS 16 /* ABI version */ -#define SND_SOC_TPLG_ABI_VERSION 0x3 +#define SND_SOC_TPLG_ABI_VERSION 0x4 /* Max size of TLV data */ #define SND_SOC_TPLG_TLV_SIZE 32 @@ -103,7 +103,8 @@ #define SND_SOC_TPLG_TYPE_PCM 7 #define SND_SOC_TPLG_TYPE_MANIFEST 8 #define SND_SOC_TPLG_TYPE_CODEC_LINK 9 -#define SND_SOC_TPLG_TYPE_PDATA 10 +#define SND_SOC_TPLG_TYPE_BACKEND_LINK 10 +#define SND_SOC_TPLG_TYPE_PDATA 11 #define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA /* vendor block IDs - please add new vendor types to end */ @@ -198,7 +199,7 @@ struct snd_soc_tplg_ctl_hdr { struct snd_soc_tplg_stream_caps { __le32 size; /* in bytes of this structure */ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - __le64 formats[SND_SOC_TPLG_MAX_FORMATS]; /* supported formats SNDRV_PCM_FMTBIT_* */ + __le64 formats; /* supported formats SNDRV_PCM_FMTBIT_* */ __le32 rates; /* supported rates SNDRV_PCM_RATE_* */ __le32 rate_min; /* min rate */ __le32 rate_max; /* max rate */ @@ -217,23 +218,12 @@ struct snd_soc_tplg_stream_caps { */ struct snd_soc_tplg_stream { __le32 size; /* in bytes of this structure */ + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* Name of the stream */ __le64 format; /* SNDRV_PCM_FMTBIT_* */ __le32 rate; /* SNDRV_PCM_RATE_* */ __le32 period_bytes; /* size of period in bytes */ __le32 buffer_bytes; /* size of buffer in bytes */ __le32 channels; /* channels */ - __le32 tdm_slot; /* optional BE bitmask of supported TDM slots */ - __le32 dai_fmt; /* SND_SOC_DAIFMT_ */ -} __attribute__((packed)); - -/* - * Duplex stream configuration supported by SW/FW. - */ -struct snd_soc_tplg_stream_config { - __le32 size; /* in bytes of this structure */ - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - struct snd_soc_tplg_stream playback; - struct snd_soc_tplg_stream capture; } __attribute__((packed)); /* @@ -366,11 +356,11 @@ struct snd_soc_tplg_dapm_widget { __le32 shift; /* bits to shift */ __le32 mask; /* non-shifted mask */ __le32 subseq; /* sort within widget type */ - __u32 invert; /* invert the power bit */ - __u32 ignore_suspend; /* kept enabled over suspend */ - __u16 event_flags; - __u16 event_type; - __u16 num_kcontrols; + __le32 invert; /* invert the power bit */ + __le32 ignore_suspend; /* kept enabled over suspend */ + __le16 event_flags; + __le16 event_type; + __le32 num_kcontrols; struct snd_soc_tplg_private priv; /* * kcontrols that relate to this widget @@ -378,30 +368,46 @@ struct snd_soc_tplg_dapm_widget { */ } __attribute__((packed)); -struct snd_soc_tplg_pcm_cfg_caps { - struct snd_soc_tplg_stream_caps caps; - struct snd_soc_tplg_stream_config configs[SND_SOC_TPLG_STREAM_CONFIG_MAX]; - __le32 num_configs; /* number of configs */ -} __attribute__((packed)); /* - * Describes SW/FW specific features of PCM or DAI link. + * Describes SW/FW specific features of PCM (FE DAI & DAI link). * - * File block representation for PCM/DAI-Link :- + * File block representation for PCM :- * +-----------------------------------+-----+ * | struct snd_soc_tplg_hdr | 1 | * +-----------------------------------+-----+ - * | struct snd_soc_tplg_dapm_pcm_dai | N | + * | struct snd_soc_tplg_pcm | N | * +-----------------------------------+-----+ */ -struct snd_soc_tplg_pcm_dai { +struct snd_soc_tplg_pcm { __le32 size; /* in bytes of this structure */ - char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; - __le32 id; /* unique ID - used to match */ - __le32 playback; /* supports playback mode */ - __le32 capture; /* supports capture mode */ - __le32 compress; /* 1 = compressed; 0 = PCM */ - struct snd_soc_tplg_pcm_cfg_caps capconf[2]; /* capabilities and configs */ + char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + __le32 pcm_id; /* unique ID - used to match */ + __le32 dai_id; /* unique ID - used to match */ + __le32 playback; /* supports playback mode */ + __le32 capture; /* supports capture mode */ + __le32 compress; /* 1 = compressed; 0 = PCM */ + struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */ + __le32 num_streams; /* number of streams */ + struct snd_soc_tplg_stream_caps caps[2]; /* playback and capture for DAI */ } __attribute__((packed)); + +/* + * Describes the BE or CC link runtime supported configs or params + * + * File block representation for BE/CC link config :- + * +-----------------------------------+-----+ + * | struct snd_soc_tplg_hdr | 1 | + * +-----------------------------------+-----+ + * | struct snd_soc_tplg_link_config | N | + * +-----------------------------------+-----+ + */ +struct snd_soc_tplg_link_config { + __le32 size; /* in bytes of this structure */ + __le32 id; /* unique ID - used to match */ + struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */ + __le32 num_streams; /* number of streams */ +} __attribute__((packed)); #endif diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index a45be6bdcf5b..a82108e5d1c0 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -100,9 +100,11 @@ enum { SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */ SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */ SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */ + SNDRV_HWDEP_IFACE_FW_DIGI00X, /* Digidesign Digi 002/003 family */ + SNDRV_HWDEP_IFACE_FW_TASCAM, /* TASCAM FireWire series */ /* Don't forget to change the following: */ - SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_OXFW + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_TASCAM }; struct snd_hwdep_info { diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h index ec1535bb6aed..5175e166987d 100644 --- a/include/uapi/sound/emu10k1.h +++ b/include/uapi/sound/emu10k1.h @@ -34,6 +34,14 @@ #define EMU10K1_FX8010_PCM_COUNT 8 +/* + * Following definition is copied from linux/types.h to support compiling + * this header file in userspace since they are not generally available for + * uapi headers. + */ +#define __EMU10K1_DECLARE_BITMAP(name,bits) \ + unsigned long name[(bits) / (sizeof(unsigned long) * 8)] + /* instruction set */ #define iMAC0 0x00 /* R = A + (X * Y >> 31) ; saturation */ #define iMAC1 0x01 /* R = A + (-X * Y >> 31) ; saturation */ @@ -300,7 +308,7 @@ struct snd_emu10k1_fx8010_control_old_gpr { struct snd_emu10k1_fx8010_code { char name[128]; - DECLARE_BITMAP(gpr_valid, 0x200); /* bitmask of valid initializers */ + __EMU10K1_DECLARE_BITMAP(gpr_valid, 0x200); /* bitmask of valid initializers */ __u32 __user *gpr_map; /* initializers */ unsigned int gpr_add_control_count; /* count of GPR controls to add/replace */ @@ -313,11 +321,11 @@ struct snd_emu10k1_fx8010_code { unsigned int gpr_list_control_total; /* total count of GPR controls */ struct snd_emu10k1_fx8010_control_gpr __user *gpr_list_controls; /* listed GPR controls */ - DECLARE_BITMAP(tram_valid, 0x100); /* bitmask of valid initializers */ + __EMU10K1_DECLARE_BITMAP(tram_valid, 0x100); /* bitmask of valid initializers */ __u32 __user *tram_data_map; /* data initializers */ __u32 __user *tram_addr_map; /* map initializers */ - DECLARE_BITMAP(code_valid, 1024); /* bitmask of valid instructions */ + __EMU10K1_DECLARE_BITMAP(code_valid, 1024); /* bitmask of valid instructions */ __u32 __user *code; /* one instruction - 64 bits */ }; diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h index 49122df3b56b..db79a12fcc78 100644 --- a/include/uapi/sound/firewire.h +++ b/include/uapi/sound/firewire.h @@ -9,6 +9,7 @@ #define SNDRV_FIREWIRE_EVENT_LOCK_STATUS 0x000010cc #define SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION 0xd1ce004e #define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475 +#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c struct snd_firewire_event_common { unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */ @@ -40,11 +41,17 @@ struct snd_firewire_event_efw_response { __be32 response[0]; /* some responses */ }; +struct snd_firewire_event_digi00x_message { + unsigned int type; + __u32 message; /* Digi00x-specific message */ +}; + union snd_firewire_event { struct snd_firewire_event_common common; struct snd_firewire_event_lock_status lock_status; struct snd_firewire_event_dice_notification dice_notification; struct snd_firewire_event_efw_response efw_response; + struct snd_firewire_event_digi00x_message digi00x_message; }; @@ -56,6 +63,8 @@ union snd_firewire_event { #define SNDRV_FIREWIRE_TYPE_FIREWORKS 2 #define SNDRV_FIREWIRE_TYPE_BEBOB 3 #define SNDRV_FIREWIRE_TYPE_OXFW 4 +#define SNDRV_FIREWIRE_TYPE_DIGI00X 5 +#define SNDRV_FIREWIRE_TYPE_TASCAM 6 /* RME, MOTU, ... */ struct snd_firewire_get_info { diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h index 5737332d38f2..c4db6f5b306e 100644 --- a/include/uapi/sound/hdspm.h +++ b/include/uapi/sound/hdspm.h @@ -20,11 +20,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#ifdef __KERNEL__ #include <linux/types.h> -#else -#include <stdint.h> -#endif /* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */ #define HDSPM_MAX_CHANNELS 64 @@ -46,15 +42,15 @@ enum hdspm_speed { /* -------------------- IOCTL Peak/RMS Meters -------------------- */ struct hdspm_peak_rms { - uint32_t input_peaks[64]; - uint32_t playback_peaks[64]; - uint32_t output_peaks[64]; + __u32 input_peaks[64]; + __u32 playback_peaks[64]; + __u32 output_peaks[64]; - uint64_t input_rms[64]; - uint64_t playback_rms[64]; - uint64_t output_rms[64]; + __u64 input_rms[64]; + __u64 playback_rms[64]; + __u64 output_rms[64]; - uint8_t speed; /* enum {ss, ds, qs} */ + __u8 speed; /* enum {ss, ds, qs} */ int status2; }; @@ -155,21 +151,21 @@ enum hdspm_syncsource { }; struct hdspm_status { - uint8_t card_type; /* enum hdspm_io_type */ + __u8 card_type; /* enum hdspm_io_type */ enum hdspm_syncsource autosync_source; - uint64_t card_clock; - uint32_t master_period; + __u64 card_clock; + __u32 master_period; union { struct { - uint8_t sync_wc; /* enum hdspm_sync */ - uint8_t sync_madi; /* enum hdspm_sync */ - uint8_t sync_tco; /* enum hdspm_sync */ - uint8_t sync_in; /* enum hdspm_sync */ - uint8_t madi_input; /* enum hdspm_madi_input */ - uint8_t channel_format; /* enum hdspm_madi_channel_format */ - uint8_t frame_format; /* enum hdspm_madi_frame_format */ + __u8 sync_wc; /* enum hdspm_sync */ + __u8 sync_madi; /* enum hdspm_sync */ + __u8 sync_tco; /* enum hdspm_sync */ + __u8 sync_in; /* enum hdspm_sync */ + __u8 madi_input; /* enum hdspm_madi_input */ + __u8 channel_format; /* enum hdspm_madi_channel_format */ + __u8 frame_format; /* enum hdspm_madi_frame_format */ } madi; } card_specific; }; @@ -184,7 +180,7 @@ struct hdspm_status { #define HDSPM_ADDON_TCO 1 struct hdspm_version { - uint8_t card_type; /* enum hdspm_io_type */ + __u8 card_type; /* enum hdspm_io_type */ char cardname[20]; unsigned int serial; unsigned short firmware_rev; diff --git a/include/uapi/xen/gntalloc.h b/include/uapi/xen/gntalloc.h index 76bd58065f4f..48d2790ef928 100644 --- a/include/uapi/xen/gntalloc.h +++ b/include/uapi/xen/gntalloc.h @@ -11,6 +11,8 @@ #ifndef __LINUX_PUBLIC_GNTALLOC_H__ #define __LINUX_PUBLIC_GNTALLOC_H__ +#include <linux/types.h> + /* * Allocates a new page and creates a new grant reference. */ @@ -19,17 +21,17 @@ _IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref)) struct ioctl_gntalloc_alloc_gref { /* IN parameters */ /* The ID of the domain to be given access to the grants. */ - uint16_t domid; + __u16 domid; /* Flags for this mapping */ - uint16_t flags; + __u16 flags; /* Number of pages to map */ - uint32_t count; + __u32 count; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ - uint64_t index; + __u64 index; /* The grant references of the newly created grant, one per page */ /* Variable size, depending on count */ - uint32_t gref_ids[1]; + __u32 gref_ids[1]; }; #define GNTALLOC_FLAG_WRITABLE 1 @@ -43,9 +45,9 @@ _IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref)) struct ioctl_gntalloc_dealloc_gref { /* IN parameters */ /* The offset returned in the map operation */ - uint64_t index; + __u64 index; /* Number of references to unmap */ - uint32_t count; + __u32 count; }; /* @@ -67,11 +69,11 @@ struct ioctl_gntalloc_unmap_notify { * be cleared. Otherwise, it can be any byte in the page whose * notification we are adjusting. */ - uint64_t index; + __u64 index; /* Action(s) to take on unmap */ - uint32_t action; + __u32 action; /* Event channel to notify */ - uint32_t event_channel_port; + __u32 event_channel_port; }; /* Clear (set to zero) the byte specified by index */ diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h index 5304bd3c84c5..aa7610a9b867 100644 --- a/include/uapi/xen/gntdev.h +++ b/include/uapi/xen/gntdev.h @@ -33,11 +33,13 @@ #ifndef __LINUX_PUBLIC_GNTDEV_H__ #define __LINUX_PUBLIC_GNTDEV_H__ +#include <linux/types.h> + struct ioctl_gntdev_grant_ref { /* The domain ID of the grant to be mapped. */ - uint32_t domid; + __u32 domid; /* The grant reference of the grant to be mapped. */ - uint32_t ref; + __u32 ref; }; /* @@ -50,11 +52,11 @@ _IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref)) struct ioctl_gntdev_map_grant_ref { /* IN parameters */ /* The number of grants to be mapped. */ - uint32_t count; - uint32_t pad; + __u32 count; + __u32 pad; /* OUT parameters */ /* The offset to be used on a subsequent call to mmap(). */ - uint64_t index; + __u64 index; /* Variable IN parameter. */ /* Array of grant references, of size @count. */ struct ioctl_gntdev_grant_ref refs[1]; @@ -70,10 +72,10 @@ _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) struct ioctl_gntdev_unmap_grant_ref { /* IN parameters */ /* The offset was returned by the corresponding map operation. */ - uint64_t index; + __u64 index; /* The number of pages to be unmapped. */ - uint32_t count; - uint32_t pad; + __u32 count; + __u32 pad; }; /* @@ -93,13 +95,13 @@ _IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr)) struct ioctl_gntdev_get_offset_for_vaddr { /* IN parameters */ /* The virtual address of the first mapped page in a range. */ - uint64_t vaddr; + __u64 vaddr; /* OUT parameters */ /* The offset that was used in the initial mmap() operation. */ - uint64_t offset; + __u64 offset; /* The number of pages mapped in the VM area that begins at @vaddr. */ - uint32_t count; - uint32_t pad; + __u32 count; + __u32 pad; }; /* @@ -113,7 +115,7 @@ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants)) struct ioctl_gntdev_set_max_grants { /* IN parameter */ /* The maximum number of grants that may be mapped at once. */ - uint32_t count; + __u32 count; }; /* @@ -135,11 +137,11 @@ struct ioctl_gntdev_unmap_notify { * be cleared. Otherwise, it can be any byte in the page whose * notification we are adjusting. */ - uint64_t index; + __u64 index; /* Action(s) to take on unmap */ - uint32_t action; + __u32 action; /* Event channel to notify */ - uint32_t event_channel_port; + __u32 event_channel_port; }; /* Clear (set to zero) the byte specified by index */ diff --git a/include/xen/balloon.h b/include/xen/balloon.h index a4c1c6a93691..d1767dfb0d95 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -8,30 +8,24 @@ struct balloon_stats { /* We aim for 'current allocation' == 'target allocation'. */ unsigned long current_pages; unsigned long target_pages; + unsigned long target_unpopulated; /* Number of pages in high- and low-memory balloons. */ unsigned long balloon_low; unsigned long balloon_high; + unsigned long total_pages; unsigned long schedule_delay; unsigned long max_schedule_delay; unsigned long retry_count; unsigned long max_retry_count; -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG - unsigned long hotplug_pages; - unsigned long balloon_hotplug; -#endif }; extern struct balloon_stats balloon_stats; void balloon_set_new_target(unsigned long target); -int alloc_xenballooned_pages(int nr_pages, struct page **pages, - bool highmem); +int alloc_xenballooned_pages(int nr_pages, struct page **pages); void free_xenballooned_pages(int nr_pages, struct page **pages); -struct page *get_balloon_scratch_page(void); -void put_balloon_scratch_page(void); - struct device; #ifdef CONFIG_XEN_SELFBALLOONING extern int register_xen_selfballooning(struct device *dev); diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 4478f4b4aae2..34b1379f9777 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -45,8 +45,10 @@ #include <asm/xen/hypervisor.h> #include <xen/features.h> +#include <xen/page.h> #include <linux/mm_types.h> #include <linux/page-flags.h> +#include <linux/kernel.h> #define GNTTAB_RESERVED_XENSTORE 1 @@ -129,6 +131,15 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback); void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, unsigned long frame, int readonly); +/* Give access to the first 4K of the page */ +static inline void gnttab_page_grant_foreign_access_ref_one( + grant_ref_t ref, domid_t domid, + struct page *page, int readonly) +{ + gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), + readonly); +} + void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, unsigned long pfn); @@ -224,4 +235,50 @@ static inline struct xen_page_foreign *xen_page_foreign(struct page *page) #endif } +/* Split Linux page in chunk of the size of the grant and call fn + * + * Parameters of fn: + * gfn: guest frame number + * offset: offset in the grant + * len: length of the data in the grant. + * data: internal information + */ +typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset, + unsigned int len, void *data); + +void gnttab_foreach_grant_in_range(struct page *page, + unsigned int offset, + unsigned int len, + xen_grant_fn_t fn, + void *data); + +/* Helper to get to call fn only on the first "grant chunk" */ +static inline void gnttab_for_one_grant(struct page *page, unsigned int offset, + unsigned len, xen_grant_fn_t fn, + void *data) +{ + /* The first request is limited to the size of one grant */ + len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK), + len); + + gnttab_foreach_grant_in_range(page, offset, len, fn, data); +} + +/* Get @nr_grefs grants from an array of page and call fn for each grant */ +void gnttab_foreach_grant(struct page **pages, + unsigned int nr_grefs, + xen_grant_fn_t fn, + void *data); + +/* Get the number of grant in a specified region + * + * start: Offset from the beginning of the first page + * len: total length of data (can cross multiple page) + */ +static inline unsigned int gnttab_count_grant(unsigned int start, + unsigned int len) +{ + return XEN_PFN_UP(xen_offset_in_page(start) + len); +} + #endif /* __ASM_GNTTAB_H__ */ diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index 9ce083960a25..f18490985fc8 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h @@ -107,5 +107,13 @@ struct sched_watchdog { #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ +/* + * Domain asked to perform 'soft reset' for it. The expected behavior is to + * reset internal Xen state for the domain returning it to the point where it + * was created but leaving the domain's memory contents and vCPU contexts + * intact. This will allow the domain to start over and set up all Xen specific + * interfaces again. + */ +#define SHUTDOWN_soft_reset 5 #endif /* __XEN_PUBLIC_SCHED_H__ */ diff --git a/include/xen/page.h b/include/xen/page.h index 1daae485e336..96294ac93755 100644 --- a/include/xen/page.h +++ b/include/xen/page.h @@ -1,11 +1,36 @@ #ifndef _XEN_PAGE_H #define _XEN_PAGE_H +#include <asm/page.h> + +/* The hypercall interface supports only 4KB page */ +#define XEN_PAGE_SHIFT 12 +#define XEN_PAGE_SIZE (_AC(1, UL) << XEN_PAGE_SHIFT) +#define XEN_PAGE_MASK (~(XEN_PAGE_SIZE-1)) +#define xen_offset_in_page(p) ((unsigned long)(p) & ~XEN_PAGE_MASK) + +/* + * We assume that PAGE_SIZE is a multiple of XEN_PAGE_SIZE + * XXX: Add a BUILD_BUG_ON? + */ + +#define xen_pfn_to_page(xen_pfn) \ + ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT))) +#define page_to_xen_pfn(page) \ + (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT) + +#define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) + +#define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT) +#define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT) +#define XEN_PFN_PHYS(x) ((phys_addr_t)(x) << XEN_PAGE_SHIFT) + #include <asm/xen/page.h> +/* Return the GFN associated to the first 4KB of the page */ static inline unsigned long xen_page_to_gfn(struct page *page) { - return pfn_to_gfn(page_to_pfn(page)); + return pfn_to_gfn(page_to_xen_pfn(page)); } struct xen_memory_region { diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 289c0b5f08fe..32b944b7cebd 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -46,8 +46,8 @@ #include <xen/interface/io/xenbus.h> #include <xen/interface/io/xs_wire.h> -#define XENBUS_MAX_RING_PAGE_ORDER 4 -#define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER) +#define XENBUS_MAX_RING_GRANT_ORDER 4 +#define XENBUS_MAX_RING_GRANTS (1U << XENBUS_MAX_RING_GRANT_ORDER) #define INVALID_GRANT_HANDLE (~0U) /* Register callback to watch this node. */ |