diff options
Diffstat (limited to 'include')
490 files changed, 10713 insertions, 4165 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index e6964e97acdd..53c088247d36 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -173,11 +173,20 @@ #define ACPI_RSDP_CHECKSUM_LENGTH 20 #define ACPI_RSDP_XCHECKSUM_LENGTH 36 -/* SMBus, GSBus and IPMI bidirectional buffer size */ +/* + * SMBus, GSBus and IPMI buffer sizes. All have a 2-byte header, + * containing both Status and Length. + */ +#define ACPI_SERIAL_HEADER_SIZE 2 /* Common for below. Status and Length fields */ + +#define ACPI_SMBUS_DATA_SIZE 32 +#define ACPI_SMBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_SMBUS_DATA_SIZE + +#define ACPI_IPMI_DATA_SIZE 64 +#define ACPI_IPMI_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_IPMI_DATA_SIZE -#define ACPI_SMBUS_BUFFER_SIZE 34 -#define ACPI_GSBUS_BUFFER_SIZE 34 -#define ACPI_IPMI_BUFFER_SIZE 66 +#define ACPI_MAX_GSBUS_DATA_SIZE 255 +#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE /* _sx_d and _sx_w control methods */ diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 856c56ef0143..09f46050961f 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -171,8 +171,10 @@ struct acpi_exception_info { #define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021) #define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022) #define AE_AML_TARGET_TYPE EXCEP_AML (0x0023) +#define AE_AML_PROTOCOL EXCEP_AML (0x0024) +#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025) -#define AE_CODE_AML_MAX 0x0023 +#define AE_CODE_AML_MAX 0x0025 /* * Internal exceptions used for control @@ -347,7 +349,10 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = { EXCEP_TXT("AE_AML_UNINITIALIZED_NODE", "A namespace node is uninitialized or unresolved"), EXCEP_TXT("AE_AML_TARGET_TYPE", - "A target operand of an incorrect type was encountered") + "A target operand of an incorrect type was encountered"), + EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"), + EXCEP_TXT("AE_AML_BUFFER_LENGTH", + "The length of the buffer is invalid/incorrect") }; static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = { diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ba4dd54f2c82..0300374101cd 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -346,10 +346,16 @@ struct acpi_device_physical_node { bool put_online:1; }; +struct acpi_device_properties { + const guid_t *guid; + const union acpi_object *properties; + struct list_head list; +}; + /* ACPI Device Specific Data (_DSD) */ struct acpi_device_data { const union acpi_object *pointer; - const union acpi_object *properties; + struct list_head properties; const union acpi_object *of_compatible; struct list_head subnodes; }; @@ -595,7 +601,6 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, u64 *size); int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); -void acpi_dma_deconfigure(struct device *dev); struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 9566f99cc3c0..0c19b68bf060 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20180810 +#define ACPI_CA_VERSION 0x20181003 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -157,13 +157,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); /* - * Optionally support group module level code. - * NOTE, this is essentially obsolete and will be removed soon - * (01/2018). - */ -ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE); - -/* * Optionally support module level code by parsing an entire table as * a method as it is loaded. Default is TRUE. * NOTE, this is essentially obsolete and will be removed soon diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 8e0b8250a139..cf59e6210d27 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -104,6 +104,7 @@ enum cppc_regs { * today. */ struct cppc_perf_caps { + u32 guaranteed_perf; u32 highest_perf; u32 nominal_perf; u32 lowest_perf; diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 20561a60db9c..cdafa5edea49 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -17,10 +17,8 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> -#ifdef CONFIG_BUG - -#ifdef CONFIG_GENERIC_BUG struct bug_entry { +#ifdef CONFIG_GENERIC_BUG #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS unsigned long bug_addr; #else @@ -35,8 +33,10 @@ struct bug_entry { unsigned short line; #endif unsigned short flags; -}; #endif /* CONFIG_GENERIC_BUG */ +}; + +#ifdef CONFIG_BUG /* * Don't use BUG() or BUG_ON() unless there's really no way out; one diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h index 28819451b6d1..a86f65bffab8 100644 --- a/include/asm-generic/compat.h +++ b/include/asm-generic/compat.h @@ -1,3 +1,25 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_COMPAT_H +#define __ASM_GENERIC_COMPAT_H -/* This is an empty stub for 32-bit-only architectures */ +/* These types are common across all compat ABIs */ +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u32 compat_ino_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s32 compat_daddr_t; +typedef s32 compat_timer_t; +typedef s32 compat_key_t; +typedef s16 compat_short_t; +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef u16 compat_ushort_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u32 compat_uptr_t; +typedef u32 compat_aio_context_t; + +#endif diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index ad2868263867..880a292d792f 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -4,16 +4,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - /* - * Use the non-coherent ops if available. If an architecture wants a - * more fine-grained selection of operations it will have to implement - * get_arch_dma_ops itself or use the per-device dma_ops. - */ -#ifdef CONFIG_DMA_NONCOHERENT_OPS - return &dma_noncoherent_ops; -#else return &dma_direct_ops; -#endif } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 9d0cde8ab716..71d7b77eea50 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -32,7 +32,7 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } -#ifndef huge_pte_clear +#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) { @@ -40,4 +40,90 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, } #endif +#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + ptep_clear_flush(vma, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTE_NONE +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} +#endif + +#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + + return 0; +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} +#endif + +#ifndef __HAVE_ARCH_HUGE_PTEP_GET +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} +#endif + #endif /* _ASM_GENERIC_HUGETLB_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 88ebc6102c7c..5657a20e0c59 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -757,7 +757,7 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) /* * Interfaces that can be used by architecture code to keep track of * memory type of pfn mappings specified by the remap_pfn_range, - * vm_insert_pfn. + * vmf_insert_pfn. */ /* @@ -773,7 +773,7 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, /* * track_pfn_insert is called when a _new_ single pfn is established - * by vm_insert_pfn(). + * by vmf_insert_pfn(). */ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 0f7062bd55e5..36254d2da8e0 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock) if (unlikely(cnts)) return 0; - return likely(atomic_cmpxchg_acquire(&lock->cnts, - cnts, cnts | _QW_LOCKED) == cnts); + return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, + _QW_LOCKED)); } /** * queued_read_lock - acquire read lock of a queue rwlock @@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock) */ static inline void queued_write_lock(struct qrwlock *lock) { + u32 cnts = 0; /* Optimize for the unfair lock case where the fair flag is 0. */ - if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0) + if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) return; queued_write_lock_slowpath(lock); diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 9cc457597ddf..7541fa707f5b 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) */ static __always_inline int queued_spin_trylock(struct qspinlock *lock) { - if (!atomic_read(&lock->val) && - (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) - return 1; - return 0; + u32 val = atomic_read(&lock->val); + + if (unlikely(val)) + return 0; + + return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); } extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); @@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); */ static __always_inline void queued_spin_lock(struct qspinlock *lock) { - u32 val; + u32 val = 0; - val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); - if (likely(val == 0)) + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) return; + queued_spin_lock_slowpath(lock, val); } diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 849cd8eb5ca0..d79abca81a52 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -141,4 +141,18 @@ static inline bool init_section_intersects(void *virt, size_t size) return memory_intersects(__init_begin, __init_end, virt, size); } +/** + * is_kernel_rodata - checks if the pointer address is located in the + * .rodata section + * + * @addr: address to check + * + * Returns: true if the address is located in .rodata, false otherwise. + */ +static inline bool is_kernel_rodata(unsigned long addr) +{ + return addr >= (unsigned long)__start_rodata && + addr < (unsigned long)__end_rodata; +} + #endif /* _ASM_GENERIC_SECTIONS_H_ */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b3353e21f3b3..6be86c1c5c58 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -20,6 +20,8 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> +#ifdef CONFIG_MMU + #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. @@ -97,12 +99,30 @@ struct mmu_gather { #endif unsigned long start; unsigned long end; - /* we are in the middle of an operation to clear - * a full mm and can make some optimizations */ - unsigned int fullmm : 1, - /* we have performed an operation which - * requires a complete flush of the tlb */ - need_flush_all : 1; + /* + * we are in the middle of an operation to clear + * a full mm and can make some optimizations + */ + unsigned int fullmm : 1; + + /* + * we have performed an operation which + * requires a complete flush of the tlb + */ + unsigned int need_flush_all : 1; + + /* + * we have removed page directories + */ + unsigned int freed_tables : 1; + + /* + * at which levels have we cleared entries? + */ + unsigned int cleared_ptes : 1; + unsigned int cleared_pmds : 1; + unsigned int cleared_puds : 1; + unsigned int cleared_p4ds : 1; struct mmu_gather_batch *active; struct mmu_gather_batch local; @@ -118,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); +void tlb_flush_mmu_free(struct mmu_gather *tlb); extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); @@ -137,6 +158,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) tlb->start = TASK_SIZE; tlb->end = 0; } + tlb->freed_tables = 0; + tlb->cleared_ptes = 0; + tlb->cleared_pmds = 0; + tlb->cleared_puds = 0; + tlb->cleared_p4ds = 0; } static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) @@ -186,6 +212,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, } #endif +static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) +{ + if (tlb->cleared_ptes) + return PAGE_SHIFT; + if (tlb->cleared_pmds) + return PMD_SHIFT; + if (tlb->cleared_puds) + return PUD_SHIFT; + if (tlb->cleared_p4ds) + return P4D_SHIFT; + + return PAGE_SHIFT; +} + +static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) +{ + return 1UL << tlb_get_unmap_shift(tlb); +} + /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, @@ -219,13 +264,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->cleared_ptes = 1; \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - do { \ - __tlb_adjust_range(tlb, address, huge_page_size(h)); \ - __tlb_remove_tlb_entry(tlb, ptep, address); \ +#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ + do { \ + unsigned long _sz = huge_page_size(h); \ + __tlb_adjust_range(tlb, address, _sz); \ + if (_sz == PMD_SIZE) \ + tlb->cleared_pmds = 1; \ + else if (_sz == PUD_SIZE) \ + tlb->cleared_puds = 1; \ + __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) /** @@ -239,6 +290,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ + tlb->cleared_pmds = 1; \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) @@ -253,6 +305,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \ + tlb->cleared_puds = 1; \ __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ } while (0) @@ -278,6 +331,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #define pte_free_tlb(tlb, ptep, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + tlb->cleared_pmds = 1; \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #endif @@ -285,7 +340,9 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #ifndef pmd_free_tlb #define pmd_free_tlb(tlb, pmdp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + tlb->cleared_puds = 1; \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) #endif @@ -295,6 +352,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #define pud_free_tlb(tlb, pudp, address) \ do { \ __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ + tlb->cleared_p4ds = 1; \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif @@ -304,12 +363,15 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, #ifndef p4d_free_tlb #define p4d_free_tlb(tlb, pudp, address) \ do { \ - __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + __tlb_adjust_range(tlb, address, PAGE_SIZE); \ + tlb->freed_tables = 1; \ __p4d_free_tlb(tlb, pudp, address); \ } while (0) #endif #endif +#endif /* CONFIG_MMU */ + #define tlb_migrate_finish(mm) do {} while (0) #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h deleted file mode 100644 index cdf904265caf..000000000000 --- a/include/asm-generic/unistd.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include <uapi/asm-generic/unistd.h> -#include <linux/export.h> - -/* - * These are required system calls, we should - * invert the logic eventually and let them - * be selected by default. - */ -#if __BITS_PER_LONG == 32 -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_LLSEEK -#endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7b75ff6e2fce..3d7a6a9c2370 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -68,7 +68,7 @@ */ #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* -#define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* @@ -203,6 +203,15 @@ #define EARLYCON_TABLE() #endif +#ifdef CONFIG_SECURITY +#define LSM_TABLE() . = ALIGN(8); \ + __start_lsm_info = .; \ + KEEP(*(.lsm_info.init)) \ + __end_lsm_info = .; +#else +#define LSM_TABLE() +#endif + #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name) @@ -253,10 +262,6 @@ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ - . = ALIGN(8); \ - __start___jump_table = .; \ - KEEP(*(__jump_table)) \ - __stop___jump_table = .; \ . = ALIGN(8); \ __start___verbose = .; \ KEEP(*(__verbose)) \ @@ -300,6 +305,12 @@ . = __start_init_task + THREAD_SIZE; \ __end_init_task = .; +#define JUMP_TABLE_DATA \ + . = ALIGN(8); \ + __start___jump_table = .; \ + KEEP(*(__jump_table)) \ + __stop___jump_table = .; + /* * Allow architectures to handle ro_after_init data on their * own by defining an empty RO_AFTER_INIT_DATA. @@ -308,6 +319,7 @@ #define RO_AFTER_INIT_DATA \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ + JUMP_TABLE_DATA \ __end_ro_after_init = .; #endif @@ -473,13 +485,6 @@ #define RODATA RO_DATA_SECTION(4096) #define RO_DATA(align) RO_DATA_SECTION(align) -#define SECURITY_INIT \ - .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ - __security_initcall_start = .; \ - KEEP(*(.security_initcall.init)) \ - __security_initcall_end = .; \ - } - /* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map @@ -604,7 +609,8 @@ IRQCHIP_OF_MATCH_TABLE() \ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ - EARLYCON_TABLE() + EARLYCON_TABLE() \ + LSM_TABLE() #define INIT_TEXT \ *(.init.text .init.text.*) \ @@ -613,8 +619,8 @@ #define EXIT_DATA \ *(.exit.data .exit.data.*) \ - *(.fini_array) \ - *(.dtors) \ + *(.fini_array .fini_array.*) \ + *(.dtors .dtors.*) \ MEM_DISCARD(exit.data*) \ MEM_DISCARD(exit.rodata*) @@ -793,11 +799,6 @@ KEEP(*(.con_initcall.init)) \ __con_initcall_end = .; -#define SECURITY_INITCALL \ - __security_initcall_start = .; \ - KEEP(*(.security_initcall.init)) \ - __security_initcall_end = .; - #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ @@ -964,7 +965,6 @@ INIT_SETUP(initsetup_align) \ INIT_CALLS \ CON_INITCALL \ - SECURITY_INITCALL \ INIT_RAM_FS \ } diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index e328b52425a8..22e6f412c595 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -234,6 +234,34 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } +static inline void crypto_stat_compress(struct acomp_req *req, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->compress_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->compress_cnt); + atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen); + } +#endif +} + +static inline void crypto_stat_decompress(struct acomp_req *req, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->compress_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->decompress_cnt); + atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen); + } +#endif +} + /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -246,8 +274,11 @@ static inline void acomp_request_set_params(struct acomp_req *req, static inline int crypto_acomp_compress(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + int ret; - return tfm->compress(req); + ret = tfm->compress(req); + crypto_stat_compress(req, ret); + return ret; } /** @@ -262,8 +293,11 @@ static inline int crypto_acomp_compress(struct acomp_req *req) static inline int crypto_acomp_decompress(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + int ret; - return tfm->decompress(req); + ret = tfm->decompress(req); + crypto_stat_decompress(req, ret); + return ret; } #endif diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 1e26f790b03f..0d765d7bfb82 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -306,6 +306,34 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) return __crypto_aead_cast(req->base.tfm); } +static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->aead_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->encrypt_cnt); + atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen); + } +#endif +} + +static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->aead_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->decrypt_cnt); + atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen); + } +#endif +} + /** * crypto_aead_encrypt() - encrypt plaintext * @req: reference to the aead_request handle that holds all information @@ -328,11 +356,14 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) static inline int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ret; if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return crypto_aead_alg(aead)->encrypt(req); + ret = -ENOKEY; + else + ret = crypto_aead_alg(aead)->encrypt(req); + crypto_stat_aead_encrypt(req, ret); + return ret; } /** @@ -360,14 +391,16 @@ static inline int crypto_aead_encrypt(struct aead_request *req) static inline int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ret; if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - if (req->cryptlen < crypto_aead_authsize(aead)) - return -EINVAL; - - return crypto_aead_alg(aead)->decrypt(req); + ret = -ENOKEY; + else if (req->cryptlen < crypto_aead_authsize(aead)) + ret = -EINVAL; + else + ret = crypto_aead_alg(aead)->decrypt(req); + crypto_stat_aead_decrypt(req, ret); + return ret; } /** diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index b5e11de4d497..afac71119396 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -271,6 +271,62 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) return alg->max_size(tfm); } +static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->encrypt_cnt); + atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen); + } +#endif +} + +static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->decrypt_cnt); + atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen); + } +#endif +} + +static inline void crypto_stat_akcipher_sign(struct akcipher_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + else + atomic_inc(&tfm->base.__crt_alg->sign_cnt); +#endif +} + +static inline void crypto_stat_akcipher_verify(struct akcipher_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); + else + atomic_inc(&tfm->base.__crt_alg->verify_cnt); +#endif +} + /** * crypto_akcipher_encrypt() - Invoke public key encrypt operation * @@ -285,8 +341,11 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + int ret; - return alg->encrypt(req); + ret = alg->encrypt(req); + crypto_stat_akcipher_encrypt(req, ret); + return ret; } /** @@ -303,8 +362,11 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + int ret; - return alg->decrypt(req); + ret = alg->decrypt(req); + crypto_stat_akcipher_decrypt(req, ret); + return ret; } /** @@ -321,8 +383,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + int ret; - return alg->sign(req); + ret = alg->sign(req); + crypto_stat_akcipher_sign(req, ret); + return ret; } /** @@ -339,8 +404,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + int ret; - return alg->verify(req); + ret = alg->verify(req); + crypto_stat_akcipher_verify(req, ret); + return ret; } /** diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index bd5e8ccf1687..4a5ad10e75f0 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -20,8 +20,10 @@ /* * Maximum values for blocksize and alignmask, used to allocate * static buffers that are big enough for any combination of - * ciphers and architectures. + * algs and architectures. Ciphers have a lower maximum size. */ +#define MAX_ALGAPI_BLOCKSIZE 160 +#define MAX_ALGAPI_ALIGNMASK 63 #define MAX_CIPHER_BLOCKSIZE 16 #define MAX_CIPHER_ALIGNMASK 15 @@ -425,4 +427,14 @@ static inline void crypto_yield(u32 flags) #endif } +int crypto_register_notifier(struct notifier_block *nb); +int crypto_unregister_notifier(struct notifier_block *nb); + +/* Crypto notification events. */ +enum { + CRYPTO_MSG_ALG_REQUEST, + CRYPTO_MSG_ALG_REGISTER, + CRYPTO_MSG_ALG_LOADED, +}; + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h index f5b8bfc22e6d..3bf28beefa33 100644 --- a/include/crypto/cbc.h +++ b/include/crypto/cbc.h @@ -113,7 +113,7 @@ static inline int crypto_cbc_decrypt_inplace( unsigned int bsize = crypto_skcipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 last_iv[bsize]; + u8 last_iv[MAX_CIPHER_BLOCKSIZE]; /* Start of the last block. */ src += nbytes - (nbytes & (bsize - 1)) - bsize; diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index b83d66073db0..f76302d99e2b 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h @@ -13,13 +13,12 @@ #define CHACHA20_IV_SIZE 16 #define CHACHA20_KEY_SIZE 32 #define CHACHA20_BLOCK_SIZE 64 -#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32)) struct chacha20_ctx { u32 key[8]; }; -void chacha20_block(u32 *state, u32 *stream); +void chacha20_block(u32 *state, u8 *stream); void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 76e432cab75d..bc7796600338 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -151,9 +151,13 @@ struct shash_desc { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +#define HASH_MAX_DIGESTSIZE 64 +#define HASH_MAX_DESCSIZE 360 +#define HASH_MAX_STATESIZE 512 + #define SHASH_DESC_ON_STACK(shash, ctx) \ char __##shash##_desc[sizeof(struct shash_desc) + \ - crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ + HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc /** @@ -408,6 +412,32 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); +static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic_inc(&tfm->base.__crt_alg->hash_err_cnt); + else + atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); +#endif +} + +static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->hash_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->hash_cnt); + atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); + } +#endif +} + /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information @@ -522,7 +552,11 @@ static inline int crypto_ahash_init(struct ahash_request *req) */ static inline int crypto_ahash_update(struct ahash_request *req) { - return crypto_ahash_reqtfm(req)->update(req); + int ret; + + ret = crypto_ahash_reqtfm(req)->update(req); + crypto_stat_ahash_update(req, ret); + return ret; } /** diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h new file mode 100644 index 000000000000..8db299c25566 --- /dev/null +++ b/include/crypto/internal/cryptouser.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <net/netlink.h> + +struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); + +int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb); +int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); +int crypto_dump_reportstat_done(struct netlink_callback *cb); diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h index 2bcfb931bc5b..71be24cd59bd 100644 --- a/include/crypto/internal/geniv.h +++ b/include/crypto/internal/geniv.h @@ -20,7 +20,7 @@ struct aead_geniv_ctx { spinlock_t lock; struct crypto_aead *child; - struct crypto_skcipher *sknull; + struct crypto_sync_skcipher *sknull; u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); }; diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 1bde0a6514fa..f517ba6d3a27 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -268,6 +268,42 @@ struct kpp_secret { unsigned short len; }; +static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + if (ret) + atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); + else + atomic_inc(&tfm->base.__crt_alg->setsecret_cnt); +#endif +} + +static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + + if (ret) + atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); + else + atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt); +#endif +} + +static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + + if (ret) + atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); + else + atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt); +#endif +} + /** * crypto_kpp_set_secret() - Invoke kpp operation * @@ -287,8 +323,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { struct kpp_alg *alg = crypto_kpp_alg(tfm); + int ret; - return alg->set_secret(tfm, buffer, len); + ret = alg->set_secret(tfm, buffer, len); + crypto_stat_kpp_set_secret(tfm, ret); + return ret; } /** @@ -308,8 +347,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); + int ret; - return alg->generate_public_key(req); + ret = alg->generate_public_key(req); + crypto_stat_kpp_generate_public_key(req, ret); + return ret; } /** @@ -326,8 +368,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); + int ret; - return alg->compute_shared_secret(req); + ret = alg->compute_shared_secret(req); + crypto_stat_kpp_compute_shared_secret(req, ret); + return ret; } /** diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h deleted file mode 100644 index b67404fc4b34..000000000000 --- a/include/crypto/mcryptd.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Software async multibuffer crypto daemon headers - * - * Author: - * Tim Chen <tim.c.chen@linux.intel.com> - * - * Copyright (c) 2014, Intel Corporation. - */ - -#ifndef _CRYPTO_MCRYPT_H -#define _CRYPTO_MCRYPT_H - -#include <linux/crypto.h> -#include <linux/kernel.h> -#include <crypto/hash.h> - -struct mcryptd_ahash { - struct crypto_ahash base; -}; - -static inline struct mcryptd_ahash *__mcryptd_ahash_cast( - struct crypto_ahash *tfm) -{ - return (struct mcryptd_ahash *)tfm; -} - -struct mcryptd_cpu_queue { - struct crypto_queue queue; - spinlock_t q_lock; - struct work_struct work; -}; - -struct mcryptd_queue { - struct mcryptd_cpu_queue __percpu *cpu_queue; -}; - -struct mcryptd_instance_ctx { - struct crypto_spawn spawn; - struct mcryptd_queue *queue; -}; - -struct mcryptd_hash_ctx { - struct crypto_ahash *child; - struct mcryptd_alg_state *alg_state; -}; - -struct mcryptd_tag { - /* seq number of request */ - unsigned seq_num; - /* arrival time of request */ - unsigned long arrival; - unsigned long expire; - int cpu; -}; - -struct mcryptd_hash_request_ctx { - struct list_head waiter; - crypto_completion_t complete; - struct mcryptd_tag tag; - struct crypto_hash_walk walk; - u8 *out; - int flag; - struct ahash_request areq; -}; - -struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, - u32 type, u32 mask); -struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); -struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); -void mcryptd_free_ahash(struct mcryptd_ahash *tfm); -void mcryptd_flusher(struct work_struct *work); - -enum mcryptd_req_type { - MCRYPTD_NONE, - MCRYPTD_UPDATE, - MCRYPTD_FINUP, - MCRYPTD_DIGEST, - MCRYPTD_FINAL -}; - -struct mcryptd_alg_cstate { - unsigned long next_flush; - unsigned next_seq_num; - bool flusher_engaged; - struct delayed_work flush; - int cpu; - struct mcryptd_alg_state *alg_state; - void *mgr; - spinlock_t work_lock; - struct list_head work_list; - struct list_head flush_list; -}; - -struct mcryptd_alg_state { - struct mcryptd_alg_cstate __percpu *alg_cstate; - unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate); -}; - -/* return delay in jiffies from current time */ -static inline unsigned long get_delay(unsigned long t) -{ - long delay; - - delay = (long) t - (long) jiffies; - if (delay <= 0) - return 0; - else - return (unsigned long) delay; -} - -void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay); - -#endif diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h index b26dd70efd9a..ba782e10065e 100644 --- a/include/crypto/morus1280_glue.h +++ b/include/crypto/morus1280_glue.h @@ -82,7 +82,7 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead); { \ } \ \ - struct aead_alg crypto_morus1280_##id##_algs[] = {\ + static struct aead_alg crypto_morus1280_##id##_algs[] = {\ { \ .setkey = crypto_morus1280_glue_setkey, \ .setauthsize = crypto_morus1280_glue_setauthsize, \ diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h index 90c8db07e740..27fa790a2362 100644 --- a/include/crypto/morus640_glue.h +++ b/include/crypto/morus640_glue.h @@ -82,7 +82,7 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead); { \ } \ \ - struct aead_alg crypto_morus640_##id##_algs[] = {\ + static struct aead_alg crypto_morus640_##id##_algs[] = {\ { \ .setkey = crypto_morus640_glue_setkey, \ .setauthsize = crypto_morus640_glue_setauthsize, \ diff --git a/include/crypto/null.h b/include/crypto/null.h index 15aeef6e30ef..0ef577cc00e3 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -9,7 +9,7 @@ #define NULL_DIGEST_SIZE 0 #define NULL_IV_SIZE 0 -struct crypto_skcipher *crypto_get_default_null_skcipher(void); +struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); #endif diff --git a/include/crypto/rng.h b/include/crypto/rng.h index b95ede354a66..6d258f5b68f1 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -122,6 +122,29 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } +static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + if (ret && ret != -EINPROGRESS && ret != -EBUSY) + atomic_inc(&tfm->base.__crt_alg->rng_err_cnt); + else + atomic_inc(&tfm->base.__crt_alg->seed_cnt); +#endif +} + +static inline void crypto_stat_rng_generate(struct crypto_rng *tfm, + unsigned int dlen, int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&tfm->base.__crt_alg->rng_err_cnt); + } else { + atomic_inc(&tfm->base.__crt_alg->generate_cnt); + atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen); + } +#endif +} + /** * crypto_rng_generate() - get random number * @tfm: cipher handle @@ -140,7 +163,11 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { - return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); + int ret; + + ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); + crypto_stat_rng_generate(tfm, dlen, ret); + return ret; } /** diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 2f327f090c3e..925f547cdcfa 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -65,6 +65,10 @@ struct crypto_skcipher { struct crypto_tfm base; }; +struct crypto_sync_skcipher { + struct crypto_skcipher base; +}; + /** * struct skcipher_alg - symmetric key cipher definition * @min_keysize: Minimum key size supported by the transformation. This is the @@ -139,9 +143,17 @@ struct skcipher_alg { struct crypto_alg base; }; -#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ +#define MAX_SYNC_SKCIPHER_REQSIZE 384 +/* + * This performs a type-check against the "tfm" argument to make sure + * all users have the correct skcipher tfm for doing on-stack requests. + */ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ - crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ + MAX_SYNC_SKCIPHER_REQSIZE + \ + (!(sizeof((struct crypto_sync_skcipher *)1 == \ + (typeof(tfm))1))) \ + ] CRYPTO_MINALIGN_ATTR; \ struct skcipher_request *name = (void *)__##name##_desc /** @@ -197,6 +209,9 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast( struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask); +struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name, + u32 type, u32 mask); + static inline struct crypto_tfm *crypto_skcipher_tfm( struct crypto_skcipher *tfm) { @@ -212,6 +227,11 @@ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm)); } +static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm) +{ + crypto_free_skcipher(&tfm->base); +} + /** * crypto_has_skcipher() - Search for the availability of an skcipher. * @alg_name: is the cra_name / name or cra_driver_name / driver name of the @@ -280,6 +300,12 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm) return tfm->ivsize; } +static inline unsigned int crypto_sync_skcipher_ivsize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_ivsize(&tfm->base); +} + static inline unsigned int crypto_skcipher_alg_chunksize( struct skcipher_alg *alg) { @@ -356,6 +382,12 @@ static inline unsigned int crypto_skcipher_blocksize( return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); } +static inline unsigned int crypto_sync_skcipher_blocksize( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_blocksize(&tfm->base); +} + static inline unsigned int crypto_skcipher_alignmask( struct crypto_skcipher *tfm) { @@ -379,6 +411,24 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm, crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags); } +static inline u32 crypto_sync_skcipher_get_flags( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_get_flags(&tfm->base); +} + +static inline void crypto_sync_skcipher_set_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_set_flags(&tfm->base, flags); +} + +static inline void crypto_sync_skcipher_clear_flags( + struct crypto_sync_skcipher *tfm, u32 flags) +{ + crypto_skcipher_clear_flags(&tfm->base, flags); +} + /** * crypto_skcipher_setkey() - set key for cipher * @tfm: cipher handle @@ -401,6 +451,12 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, return tfm->setkey(tfm, key, keylen); } +static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_skcipher_setkey(&tfm->base, key, keylen); +} + static inline unsigned int crypto_skcipher_default_keysize( struct crypto_skcipher *tfm) { @@ -422,6 +478,40 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm( return __crypto_skcipher_cast(req->base.tfm); } +static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( + struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + + return container_of(tfm, struct crypto_sync_skcipher, base); +} + +static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req, + int ret, struct crypto_alg *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&alg->cipher_err_cnt); + } else { + atomic_inc(&alg->encrypt_cnt); + atomic64_add(req->cryptlen, &alg->encrypt_tlen); + } +#endif +} + +static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req, + int ret, struct crypto_alg *alg) +{ +#ifdef CONFIG_CRYPTO_STATS + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&alg->cipher_err_cnt); + } else { + atomic_inc(&alg->decrypt_cnt); + atomic64_add(req->cryptlen, &alg->decrypt_tlen); + } +#endif +} + /** * crypto_skcipher_encrypt() - encrypt plaintext * @req: reference to the skcipher_request handle that holds all information @@ -436,11 +526,14 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm( static inline int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int ret; if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->encrypt(req); + ret = -ENOKEY; + else + ret = tfm->encrypt(req); + crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg); + return ret; } /** @@ -457,11 +550,14 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) static inline int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int ret; if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) - return -ENOKEY; - - return tfm->decrypt(req); + ret = -ENOKEY; + else + ret = tfm->decrypt(req); + crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg); + return ret; } /** @@ -500,6 +596,12 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req, req->base.tfm = crypto_skcipher_tfm(tfm); } +static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req, + struct crypto_sync_skcipher *tfm) +{ + skcipher_request_set_tfm(req, &tfm->base); +} + static inline struct skcipher_request *skcipher_request_cast( struct crypto_async_request *req) { diff --git a/include/crypto/speck.h b/include/crypto/speck.h deleted file mode 100644 index 73cfc952d405..000000000000 --- a/include/crypto/speck.h +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Common values for the Speck algorithm - */ - -#ifndef _CRYPTO_SPECK_H -#define _CRYPTO_SPECK_H - -#include <linux/types.h> - -/* Speck128 */ - -#define SPECK128_BLOCK_SIZE 16 - -#define SPECK128_128_KEY_SIZE 16 -#define SPECK128_128_NROUNDS 32 - -#define SPECK128_192_KEY_SIZE 24 -#define SPECK128_192_NROUNDS 33 - -#define SPECK128_256_KEY_SIZE 32 -#define SPECK128_256_NROUNDS 34 - -struct speck128_tfm_ctx { - u64 round_keys[SPECK128_256_NROUNDS]; - int nrounds; -}; - -void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx, - u8 *out, const u8 *in); - -void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx, - u8 *out, const u8 *in); - -int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key, - unsigned int keysize); - -/* Speck64 */ - -#define SPECK64_BLOCK_SIZE 8 - -#define SPECK64_96_KEY_SIZE 12 -#define SPECK64_96_NROUNDS 26 - -#define SPECK64_128_KEY_SIZE 16 -#define SPECK64_128_NROUNDS 27 - -struct speck64_tfm_ctx { - u32 round_keys[SPECK64_128_NROUNDS]; - int nrounds; -}; - -void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx, - u8 *out, const u8 *in); - -void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx, - u8 *out, const u8 *in); - -int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key, - unsigned int keysize); - -#endif /* _CRYPTO_SPECK_H */ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index da9d95a19580..1e713154f00e 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -153,6 +153,17 @@ struct __drm_planes_state { struct __drm_crtcs_state { struct drm_crtc *ptr; struct drm_crtc_state *state, *old_state, *new_state; + + /** + * @commit: + * + * A reference to the CRTC commit object that is kept for use by + * drm_atomic_helper_wait_for_flip_done() after + * drm_atomic_helper_commit_hw_done() is called. This ensures that a + * concurrent commit won't free a commit object that is still in use. + */ + struct drm_crtc_commit *commit; + s32 __user *out_fence_ptr; u64 last_vblank_count; }; diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 989f8e52864d..971bb7853776 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -87,9 +87,10 @@ struct drm_client_dev { struct drm_file *file; }; -int drm_client_new(struct drm_device *dev, struct drm_client_dev *client, - const char *name, const struct drm_client_funcs *funcs); +int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, + const char *name, const struct drm_client_funcs *funcs); void drm_client_release(struct drm_client_dev *client); +void drm_client_add(struct drm_client_dev *client); void drm_client_dev_unregister(struct drm_device *dev); void drm_client_dev_hotplug(struct drm_device *dev); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index b25d12ef120a..e3c404833115 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -214,9 +214,9 @@ struct detailed_timing { #define DRM_EDID_HDMI_DC_Y444 (1 << 3) /* YCBCR 420 deep color modes */ -#define DRM_EDID_YCBCR420_DC_48 (1 << 6) -#define DRM_EDID_YCBCR420_DC_36 (1 << 5) -#define DRM_EDID_YCBCR420_DC_30 (1 << 4) +#define DRM_EDID_YCBCR420_DC_48 (1 << 2) +#define DRM_EDID_YCBCR420_DC_36 (1 << 1) +#define DRM_EDID_YCBCR420_DC_30 (1 << 0) #define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ DRM_EDID_YCBCR420_DC_36 | \ DRM_EDID_YCBCR420_DC_30) diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h index c796ff02ceeb..fe8214017b46 100644 --- a/include/dt-bindings/clock/exynos3250.h +++ b/include/dt-bindings/clock/exynos3250.h @@ -1,11 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Tomasz Figa <t.figa@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants for Samsung Exynos3250 clock controllers. */ diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h index e9f9d400c322..5b1d68512360 100644 --- a/include/dt-bindings/clock/exynos4.h +++ b/include/dt-bindings/clock/exynos4.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Andrzej Hajda <a.hajda@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants for Exynos4 clock controller. -*/ + */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_4_H #define _DT_BINDINGS_CLOCK_EXYNOS_4_H diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h index 15508adcdfde..bc8a3c53a54b 100644 --- a/include/dt-bindings/clock/exynos5250.h +++ b/include/dt-bindings/clock/exynos5250.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Andrzej Hajda <a.hajda@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants for Exynos5250 clock controller. -*/ + */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_5250_H #define _DT_BINDINGS_CLOCK_EXYNOS_5250_H diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h index a4bac9a1764f..98a58cbd81b2 100644 --- a/include/dt-bindings/clock/exynos5260-clk.h +++ b/include/dt-bindings/clock/exynos5260-clk.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Rahul Sharma <rahul.sharma@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Provides Constants for Exynos5260 clocks. -*/ + */ #ifndef _DT_BINDINGS_CLK_EXYNOS5260_H #define _DT_BINDINGS_CLK_EXYNOS5260_H diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h index 6cb4e90f81fc..f179eabbcdb7 100644 --- a/include/dt-bindings/clock/exynos5410.h +++ b/include/dt-bindings/clock/exynos5410.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Copyright (c) 2016 Krzysztof Kozlowski * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants for Exynos5421 clock controller. -*/ + */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H #define _DT_BINDINGS_CLOCK_EXYNOS_5410_H diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h index 2740ae0424a9..355f469943f1 100644 --- a/include/dt-bindings/clock/exynos5420.h +++ b/include/dt-bindings/clock/exynos5420.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Andrzej Hajda <a.hajda@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants for Exynos5420 clock controller. -*/ + */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS_5420_H #define _DT_BINDINGS_CLOCK_EXYNOS_5420_H diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h index be39d23e6a32..98bd85ce1e45 100644 --- a/include/dt-bindings/clock/exynos5433.h +++ b/include/dt-bindings/clock/exynos5433.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Chanwoo Choi <cw00.choi@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index 10c558611085..fce33c7050c8 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -1,11 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ + */ #ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H #define _DT_BINDINGS_CLOCK_EXYNOS7_H diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h index 352a7673fc69..0fb65c3f2f59 100644 --- a/include/dt-bindings/clock/s3c2410.h +++ b/include/dt-bindings/clock/s3c2410.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants clock controllers of Samsung S3C2410 and later. */ diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h index aac1dcfda81c..b4656156cc0f 100644 --- a/include/dt-bindings/clock/s3c2412.h +++ b/include/dt-bindings/clock/s3c2412.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants clock controllers of Samsung S3C2412. */ diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h index f3ba68a25ecb..a9d2f105d536 100644 --- a/include/dt-bindings/clock/s3c2443.h +++ b/include/dt-bindings/clock/s3c2443.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Device Tree binding constants clock controllers of Samsung S3C2443 and later. */ diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h new file mode 100644 index 000000000000..f7bd69350d18 --- /dev/null +++ b/include/dt-bindings/gpio/meson-g12a-gpio.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2018 Amlogic, Inc. All rights reserved. + * Author: Xingyu Chen <xingyu.chen@amlogic.com> + */ + +#ifndef _DT_BINDINGS_MESON_G12A_GPIO_H +#define _DT_BINDINGS_MESON_G12A_GPIO_H + +/* First GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOE_0 12 +#define GPIOE_1 13 +#define GPIOE_2 14 + +/* Second GPIO chip */ +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define GPIOZ_11 11 +#define GPIOZ_12 12 +#define GPIOZ_13 13 +#define GPIOZ_14 14 +#define GPIOZ_15 15 +#define GPIOH_0 16 +#define GPIOH_1 17 +#define GPIOH_2 18 +#define GPIOH_3 19 +#define GPIOH_4 20 +#define GPIOH_5 21 +#define GPIOH_6 22 +#define GPIOH_7 23 +#define GPIOH_8 24 +#define BOOT_0 25 +#define BOOT_1 26 +#define BOOT_2 27 +#define BOOT_3 28 +#define BOOT_4 29 +#define BOOT_5 30 +#define BOOT_6 31 +#define BOOT_7 32 +#define BOOT_8 33 +#define BOOT_9 34 +#define BOOT_10 35 +#define BOOT_11 36 +#define BOOT_12 37 +#define BOOT_13 38 +#define BOOT_14 39 +#define BOOT_15 40 +#define GPIOC_0 41 +#define GPIOC_1 42 +#define GPIOC_2 43 +#define GPIOC_3 44 +#define GPIOC_4 45 +#define GPIOC_5 46 +#define GPIOC_6 47 +#define GPIOC_7 48 +#define GPIOA_0 49 +#define GPIOA_1 50 +#define GPIOA_2 51 +#define GPIOA_3 52 +#define GPIOA_4 53 +#define GPIOA_5 54 +#define GPIOA_6 55 +#define GPIOA_7 56 +#define GPIOA_8 57 +#define GPIOA_9 58 +#define GPIOA_10 59 +#define GPIOA_11 60 +#define GPIOA_12 61 +#define GPIOA_13 62 +#define GPIOA_14 63 +#define GPIOA_15 64 +#define GPIOX_0 65 +#define GPIOX_1 66 +#define GPIOX_2 67 +#define GPIOX_3 68 +#define GPIOX_4 69 +#define GPIOX_5 70 +#define GPIOX_6 71 +#define GPIOX_7 72 +#define GPIOX_8 73 +#define GPIOX_9 74 +#define GPIOX_10 75 +#define GPIOX_11 76 +#define GPIOX_12 77 +#define GPIOX_13 78 +#define GPIOX_14 79 +#define GPIOX_15 80 +#define GPIOX_16 81 +#define GPIOX_17 82 +#define GPIOX_18 83 +#define GPIOX_19 84 + +#endif /* _DT_BINDINGS_MESON_G12A_GPIO_H */ diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h index 0c85f65c81c7..35b6f69b7db6 100644 --- a/include/dt-bindings/interrupt-controller/arm-gic.h +++ b/include/dt-bindings/interrupt-controller/arm-gic.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * This header provides constants for the ARM GIC. */ diff --git a/include/dt-bindings/interrupt-controller/irq.h b/include/dt-bindings/interrupt-controller/irq.h index a8b310555f14..9e3d183e1381 100644 --- a/include/dt-bindings/interrupt-controller/irq.h +++ b/include/dt-bindings/interrupt-controller/irq.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * This header provides constants for most IRQ bindings. * diff --git a/include/dt-bindings/mfd/at91-usart.h b/include/dt-bindings/mfd/at91-usart.h new file mode 100644 index 000000000000..2de5bc312e1e --- /dev/null +++ b/include/dt-bindings/mfd/at91-usart.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides macros for AT91 USART DT bindings. + * + * Copyright (C) 2018 Microchip Technology + * + * Author: Radu Pirea <radu.pirea@microchip.com> + * + */ + +#ifndef __DT_BINDINGS_AT91_USART_H__ +#define __DT_BINDINGS_AT91_USART_H__ + +#define AT91_USART_MODE_SERIAL 0 +#define AT91_USART_MODE_SPI 1 + +#endif /* __DT_BINDINGS_AT91_USART_H__ */ diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h index 697161f80eb5..9eb2ec2b2ea9 100644 --- a/include/dt-bindings/net/mscc-phy-vsc8531.h +++ b/include/dt-bindings/net/mscc-phy-vsc8531.h @@ -18,9 +18,11 @@ #define VSC8531_LINK_100_1000_ACTIVITY 4 #define VSC8531_LINK_10_1000_ACTIVITY 5 #define VSC8531_LINK_10_100_ACTIVITY 6 +#define VSC8584_LINK_100FX_1000X_ACTIVITY 7 #define VSC8531_DUPLEX_COLLISION 8 #define VSC8531_COLLISION 9 #define VSC8531_ACTIVITY 10 +#define VSC8584_100FX_1000X_ACTIVITY 11 #define VSC8531_AUTONEG_FAULT 12 #define VSC8531_SERIAL_MODE 13 #define VSC8531_FORCE_LED_OFF 14 diff --git a/include/dt-bindings/phy/phy-ocelot-serdes.h b/include/dt-bindings/phy/phy-ocelot-serdes.h new file mode 100644 index 000000000000..fe70adaca68f --- /dev/null +++ b/include/dt-bindings/phy/phy-ocelot-serdes.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* Copyright (c) 2018 Microsemi Corporation */ +#ifndef __PHY_OCELOT_SERDES_H__ +#define __PHY_OCELOT_SERDES_H__ + +#define SERDES1G(x) (x) +#define SERDES1G_MAX SERDES1G(5) +#define SERDES6G(x) (SERDES1G_MAX + 1 + (x)) +#define SERDES6G_MAX SERDES6G(2) +#define SERDES_MAX (SERDES6G_MAX + 1) + +#endif diff --git a/include/dt-bindings/pinctrl/rzn1-pinctrl.h b/include/dt-bindings/pinctrl/rzn1-pinctrl.h new file mode 100644 index 000000000000..21d6cc4d59f5 --- /dev/null +++ b/include/dt-bindings/pinctrl/rzn1-pinctrl.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Defines macros and constants for Renesas RZ/N1 pin controller pin + * muxing functions. + */ +#ifndef __DT_BINDINGS_RZN1_PINCTRL_H +#define __DT_BINDINGS_RZN1_PINCTRL_H + +#define RZN1_PINMUX(_gpio, _func) \ + (((_func) << 8) | (_gpio)) + +/* + * Given the different levels of muxing on the SoC, it was decided to + * 'linearize' them into one numerical space. So mux level 1, 2 and the MDIO + * muxes are all represented by one single value. + * + * You can derive the hardware value pretty easily too, as + * 0...9 are Level 1 + * 10...71 are Level 2. The Level 2 mux will be set to this + * value - RZN1_FUNC_L2_OFFSET, and the Level 1 mux will be + * set accordingly. + * 72...103 are for the 2 MDIO muxes. + */ +#define RZN1_FUNC_HIGHZ 0 +#define RZN1_FUNC_0L 1 +#define RZN1_FUNC_CLK_ETH_MII_RGMII_RMII 2 +#define RZN1_FUNC_CLK_ETH_NAND 3 +#define RZN1_FUNC_QSPI 4 +#define RZN1_FUNC_SDIO 5 +#define RZN1_FUNC_LCD 6 +#define RZN1_FUNC_LCD_E 7 +#define RZN1_FUNC_MSEBIM 8 +#define RZN1_FUNC_MSEBIS 9 +#define RZN1_FUNC_L2_OFFSET 10 /* I'm Special */ + +#define RZN1_FUNC_HIGHZ1 (RZN1_FUNC_L2_OFFSET + 0) +#define RZN1_FUNC_ETHERCAT (RZN1_FUNC_L2_OFFSET + 1) +#define RZN1_FUNC_SERCOS3 (RZN1_FUNC_L2_OFFSET + 2) +#define RZN1_FUNC_SDIO_E (RZN1_FUNC_L2_OFFSET + 3) +#define RZN1_FUNC_ETH_MDIO (RZN1_FUNC_L2_OFFSET + 4) +#define RZN1_FUNC_ETH_MDIO_E1 (RZN1_FUNC_L2_OFFSET + 5) +#define RZN1_FUNC_USB (RZN1_FUNC_L2_OFFSET + 6) +#define RZN1_FUNC_MSEBIM_E (RZN1_FUNC_L2_OFFSET + 7) +#define RZN1_FUNC_MSEBIS_E (RZN1_FUNC_L2_OFFSET + 8) +#define RZN1_FUNC_RSV (RZN1_FUNC_L2_OFFSET + 9) +#define RZN1_FUNC_RSV_E (RZN1_FUNC_L2_OFFSET + 10) +#define RZN1_FUNC_RSV_E1 (RZN1_FUNC_L2_OFFSET + 11) +#define RZN1_FUNC_UART0_I (RZN1_FUNC_L2_OFFSET + 12) +#define RZN1_FUNC_UART0_I_E (RZN1_FUNC_L2_OFFSET + 13) +#define RZN1_FUNC_UART1_I (RZN1_FUNC_L2_OFFSET + 14) +#define RZN1_FUNC_UART1_I_E (RZN1_FUNC_L2_OFFSET + 15) +#define RZN1_FUNC_UART2_I (RZN1_FUNC_L2_OFFSET + 16) +#define RZN1_FUNC_UART2_I_E (RZN1_FUNC_L2_OFFSET + 17) +#define RZN1_FUNC_UART0 (RZN1_FUNC_L2_OFFSET + 18) +#define RZN1_FUNC_UART0_E (RZN1_FUNC_L2_OFFSET + 19) +#define RZN1_FUNC_UART1 (RZN1_FUNC_L2_OFFSET + 20) +#define RZN1_FUNC_UART1_E (RZN1_FUNC_L2_OFFSET + 21) +#define RZN1_FUNC_UART2 (RZN1_FUNC_L2_OFFSET + 22) +#define RZN1_FUNC_UART2_E (RZN1_FUNC_L2_OFFSET + 23) +#define RZN1_FUNC_UART3 (RZN1_FUNC_L2_OFFSET + 24) +#define RZN1_FUNC_UART3_E (RZN1_FUNC_L2_OFFSET + 25) +#define RZN1_FUNC_UART4 (RZN1_FUNC_L2_OFFSET + 26) +#define RZN1_FUNC_UART4_E (RZN1_FUNC_L2_OFFSET + 27) +#define RZN1_FUNC_UART5 (RZN1_FUNC_L2_OFFSET + 28) +#define RZN1_FUNC_UART5_E (RZN1_FUNC_L2_OFFSET + 29) +#define RZN1_FUNC_UART6 (RZN1_FUNC_L2_OFFSET + 30) +#define RZN1_FUNC_UART6_E (RZN1_FUNC_L2_OFFSET + 31) +#define RZN1_FUNC_UART7 (RZN1_FUNC_L2_OFFSET + 32) +#define RZN1_FUNC_UART7_E (RZN1_FUNC_L2_OFFSET + 33) +#define RZN1_FUNC_SPI0_M (RZN1_FUNC_L2_OFFSET + 34) +#define RZN1_FUNC_SPI0_M_E (RZN1_FUNC_L2_OFFSET + 35) +#define RZN1_FUNC_SPI1_M (RZN1_FUNC_L2_OFFSET + 36) +#define RZN1_FUNC_SPI1_M_E (RZN1_FUNC_L2_OFFSET + 37) +#define RZN1_FUNC_SPI2_M (RZN1_FUNC_L2_OFFSET + 38) +#define RZN1_FUNC_SPI2_M_E (RZN1_FUNC_L2_OFFSET + 39) +#define RZN1_FUNC_SPI3_M (RZN1_FUNC_L2_OFFSET + 40) +#define RZN1_FUNC_SPI3_M_E (RZN1_FUNC_L2_OFFSET + 41) +#define RZN1_FUNC_SPI4_S (RZN1_FUNC_L2_OFFSET + 42) +#define RZN1_FUNC_SPI4_S_E (RZN1_FUNC_L2_OFFSET + 43) +#define RZN1_FUNC_SPI5_S (RZN1_FUNC_L2_OFFSET + 44) +#define RZN1_FUNC_SPI5_S_E (RZN1_FUNC_L2_OFFSET + 45) +#define RZN1_FUNC_SGPIO0_M (RZN1_FUNC_L2_OFFSET + 46) +#define RZN1_FUNC_SGPIO1_M (RZN1_FUNC_L2_OFFSET + 47) +#define RZN1_FUNC_GPIO (RZN1_FUNC_L2_OFFSET + 48) +#define RZN1_FUNC_CAN (RZN1_FUNC_L2_OFFSET + 49) +#define RZN1_FUNC_I2C (RZN1_FUNC_L2_OFFSET + 50) +#define RZN1_FUNC_SAFE (RZN1_FUNC_L2_OFFSET + 51) +#define RZN1_FUNC_PTO_PWM (RZN1_FUNC_L2_OFFSET + 52) +#define RZN1_FUNC_PTO_PWM1 (RZN1_FUNC_L2_OFFSET + 53) +#define RZN1_FUNC_PTO_PWM2 (RZN1_FUNC_L2_OFFSET + 54) +#define RZN1_FUNC_PTO_PWM3 (RZN1_FUNC_L2_OFFSET + 55) +#define RZN1_FUNC_PTO_PWM4 (RZN1_FUNC_L2_OFFSET + 56) +#define RZN1_FUNC_DELTA_SIGMA (RZN1_FUNC_L2_OFFSET + 57) +#define RZN1_FUNC_SGPIO2_M (RZN1_FUNC_L2_OFFSET + 58) +#define RZN1_FUNC_SGPIO3_M (RZN1_FUNC_L2_OFFSET + 59) +#define RZN1_FUNC_SGPIO4_S (RZN1_FUNC_L2_OFFSET + 60) +#define RZN1_FUNC_MAC_MTIP_SWITCH (RZN1_FUNC_L2_OFFSET + 61) + +#define RZN1_FUNC_MDIO_OFFSET (RZN1_FUNC_L2_OFFSET + 62) + +/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO function */ +#define RZN1_FUNC_MDIO0_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 0) +#define RZN1_FUNC_MDIO0_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 1) +#define RZN1_FUNC_MDIO0_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 2) +#define RZN1_FUNC_MDIO0_ECAT (RZN1_FUNC_MDIO_OFFSET + 3) +#define RZN1_FUNC_MDIO0_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 4) +#define RZN1_FUNC_MDIO0_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 5) +#define RZN1_FUNC_MDIO0_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 6) +#define RZN1_FUNC_MDIO0_SWITCH (RZN1_FUNC_MDIO_OFFSET + 7) +/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */ +#define RZN1_FUNC_MDIO0_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 8) +#define RZN1_FUNC_MDIO0_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 9) +#define RZN1_FUNC_MDIO0_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 10) +#define RZN1_FUNC_MDIO0_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 11) +#define RZN1_FUNC_MDIO0_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 12) +#define RZN1_FUNC_MDIO0_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 13) +#define RZN1_FUNC_MDIO0_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 14) +#define RZN1_FUNC_MDIO0_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 15) + +/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO function */ +#define RZN1_FUNC_MDIO1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 16) +#define RZN1_FUNC_MDIO1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 17) +#define RZN1_FUNC_MDIO1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 18) +#define RZN1_FUNC_MDIO1_ECAT (RZN1_FUNC_MDIO_OFFSET + 19) +#define RZN1_FUNC_MDIO1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 20) +#define RZN1_FUNC_MDIO1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 21) +#define RZN1_FUNC_MDIO1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 22) +#define RZN1_FUNC_MDIO1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 23) +/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */ +#define RZN1_FUNC_MDIO1_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 24) +#define RZN1_FUNC_MDIO1_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 25) +#define RZN1_FUNC_MDIO1_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 26) +#define RZN1_FUNC_MDIO1_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 27) +#define RZN1_FUNC_MDIO1_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 28) +#define RZN1_FUNC_MDIO1_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 29) +#define RZN1_FUNC_MDIO1_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 30) +#define RZN1_FUNC_MDIO1_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 31) + +#define RZN1_FUNC_MAX (RZN1_FUNC_MDIO_OFFSET + 32) + +#endif /* __DT_BINDINGS_RZN1_PINCTRL_H */ diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h index 63948170c7b2..31b3f87dde9a 100644 --- a/include/dt-bindings/reset/imx7-reset.h +++ b/include/dt-bindings/reset/imx7-reset.h @@ -56,7 +56,9 @@ #define IMX7_RESET_DDRC_PRST 23 #define IMX7_RESET_DDRC_CORE_RST 24 -#define IMX7_RESET_NUM 25 +#define IMX7_RESET_PCIE_CTRL_APPS_TURNOFF 25 + +#define IMX7_RESET_NUM 26 #endif diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h index 0646500bca69..642e4e7f4084 100644 --- a/include/dt-bindings/thermal/thermal_exynos.h +++ b/include/dt-bindings/thermal/thermal_exynos.h @@ -1,19 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions * * Copyright (C) 2014 Samsung Electronics * Lukasz Majewski <l.majewski@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef _EXYNOS_THERMAL_TMU_DT_H diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h index 7b7a92fefa0a..985f2bbd4d24 100644 --- a/include/dt-bindings/usb/pd.h +++ b/include/dt-bindings/usb/pd.h @@ -59,4 +59,30 @@ (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \ PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma)) +#define APDO_TYPE_PPS 0 + +#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */ +#define PDO_APDO_TYPE_MASK 0x3 + +#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT) + +#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */ +#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */ +#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */ + +#define PDO_PPS_APDO_VOLT_MASK 0xff +#define PDO_PPS_APDO_CURR_MASK 0x7f + +#define PDO_PPS_APDO_MIN_VOLT(mv) \ + ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT) +#define PDO_PPS_APDO_MAX_VOLT(mv) \ + ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT) +#define PDO_PPS_APDO_MAX_CURR(ma) \ + ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT) + +#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \ + (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \ + PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \ + PDO_PPS_APDO_MAX_CURR(max_ma)) + #endif /* __DT_POWER_DELIVERY_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index de8d3d3fa651..ed80f147bd50 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -831,8 +831,6 @@ static inline int acpi_dma_configure(struct device *dev, return 0; } -static inline void acpi_dma_deconfigure(struct device *dev) { } - #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -1074,6 +1072,15 @@ static inline int acpi_node_get_property_reference( NR_FWNODE_REFERENCE_ARGS, args); } +static inline bool acpi_dev_has_props(const struct acpi_device *adev) +{ + return !list_empty(&adev->data.properties); +} + +struct acpi_device_properties * +acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, + const union acpi_object *properties); + int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); int acpi_dev_prop_read_single(struct acpi_device *adev, diff --git a/include/linux/adxl.h b/include/linux/adxl.h new file mode 100644 index 000000000000..2a629acb4c3f --- /dev/null +++ b/include/linux/adxl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Address translation interface via ACPI DSM. + * Copyright (C) 2018 Intel Corporation + */ + +#ifndef _LINUX_ADXL_H +#define _LINUX_ADXL_H + +const char * const *adxl_get_component_names(void); +int adxl_decode(u64 addr, u64 component_values[]); + +#endif /* _LINUX_ADXL_H */ diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index da8357ba11bc..c92ebc39fc1f 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -18,20 +18,13 @@ * mask into a value to be binary (or set some other custom bits * in MMCIPWR) or:ed and written into the MMCIPWR register of the * block. May also control external power based on the power_mode. - * @status: if no GPIO read function was given to the block in - * gpio_wp (below) this function will be called to determine - * whether a card is present in the MMC slot or not - * @gpio_wp: read this GPIO pin to see if the card is write protected - * @gpio_cd: read this GPIO pin to detect card insertion - * @cd_invert: true if the gpio_cd pin value is active low + * @status: if no GPIO line was given to the block in this function will + * be called to determine whether a card is present in the MMC slot or not */ struct mmci_platform_data { unsigned int ocr_mask; int (*ios_handler)(struct device *, struct mmc_ios *); unsigned int (*status)(struct device *); - int gpio_wp; - int gpio_cd; - bool cd_invert; }; #endif diff --git a/include/linux/amifd.h b/include/linux/amifd.h deleted file mode 100644 index 202a77dbe46d..000000000000 --- a/include/linux/amifd.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _AMIFD_H -#define _AMIFD_H - -/* Definitions for the Amiga floppy driver */ - -#include <linux/fd.h> - -#define FD_MAX_UNITS 4 /* Max. Number of drives */ -#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */ - -#ifndef ASSEMBLER - -struct fd_data_type { - char *name; /* description of data type */ - int sects; /* sectors per track */ -#ifdef __STDC__ - int (*read_fkt)(int); - void (*write_fkt)(int); -#else - int (*read_fkt)(); /* read whole track */ - void (*write_fkt)(); /* write whole track */ -#endif -}; - -/* -** Floppy type descriptions -*/ - -struct fd_drive_type { - unsigned long code; /* code returned from drive */ - char *name; /* description of drive */ - unsigned int tracks; /* number of tracks */ - unsigned int heads; /* number of heads */ - unsigned int read_size; /* raw read size for one track */ - unsigned int write_size; /* raw write size for one track */ - unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */ - unsigned int precomp1; /* start track for precomp 1 */ - unsigned int precomp2; /* start track for precomp 2 */ - unsigned int step_delay; /* time (in ms) for delay after step */ - unsigned int settle_time; /* time to settle after dir change */ - unsigned int side_time; /* time needed to change sides */ -}; - -struct amiga_floppy_struct { - struct fd_drive_type *type; /* type of floppy for this unit */ - struct fd_data_type *dtype; /* type of floppy for this unit */ - int track; /* current track (-1 == unknown) */ - unsigned char *trackbuf; /* current track (kmaloc()'d */ - - int blocks; /* total # blocks on disk */ - - int changed; /* true when not known */ - int disk; /* disk in drive (-1 == unknown) */ - int motor; /* true when motor is at speed */ - int busy; /* true when drive is active */ - int dirty; /* true when trackbuf is not on disk */ - int status; /* current error code for unit */ - struct gendisk *gendisk; -}; -#endif - -#endif diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h deleted file mode 100644 index 9b514d05ec70..000000000000 --- a/include/linux/amifdreg.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_AMIFDREG_H -#define _LINUX_AMIFDREG_H - -/* -** CIAAPRA bits (read only) -*/ - -#define DSKRDY (0x1<<5) /* disk ready when low */ -#define DSKTRACK0 (0x1<<4) /* head at track zero when low */ -#define DSKPROT (0x1<<3) /* disk protected when low */ -#define DSKCHANGE (0x1<<2) /* low when disk removed */ - -/* -** CIAAPRB bits (read/write) -*/ - -#define DSKMOTOR (0x1<<7) /* motor on when low */ -#define DSKSEL3 (0x1<<6) /* select drive 3 when low */ -#define DSKSEL2 (0x1<<5) /* select drive 2 when low */ -#define DSKSEL1 (0x1<<4) /* select drive 1 when low */ -#define DSKSEL0 (0x1<<3) /* select drive 0 when low */ -#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */ -#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */ -#define DSKSTEP (0x1) /* pulse low to step head 1 track */ - -/* -** DSKBYTR bits (read only) -*/ - -#define DSKBYT (1<<15) /* register contains valid byte when set */ -#define DMAON (1<<14) /* disk DMA enabled */ -#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */ -#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */ -/* bits 7-0 are data */ - -/* -** ADKCON/ADKCONR bits -*/ - -#ifndef SETCLR -#define ADK_SETCLR (1<<15) /* control bit */ -#endif -#define ADK_PRECOMP1 (1<<14) /* precompensation selection */ -#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */ -#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */ -#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */ -#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */ -#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */ - -/* -** DSKLEN bits -*/ - -#define DSKLEN_DMAEN (1<<15) -#define DSKLEN_WRITE (1<<14) - -/* -** INTENA/INTREQ bits -*/ - -#define DSKINDEX (0x1<<4) /* DSKINDEX bit */ - -/* -** Misc -*/ - -#define MFM_SYNC 0x4489 /* standard MFM sync value */ - -/* Values for FD_COMMAND */ -#define FD_RECALIBRATE 0x07 /* move to track 0 */ -#define FD_SEEK 0x0F /* seek track */ -#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */ -#define FD_WRITE 0xC5 /* write with MT, MFM */ -#define FD_SENSEI 0x08 /* Sense Interrupt Status */ -#define FD_SPECIFY 0x03 /* specify HUT etc */ -#define FD_FORMAT 0x4D /* format one track */ -#define FD_VERSION 0x10 /* get version code */ -#define FD_CONFIGURE 0x13 /* configure FIFO operation */ -#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */ - -#endif /* _LINUX_AMIFDREG_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 2b709416de05..d9bdc1a7f4e7 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -9,6 +9,7 @@ #include <linux/percpu.h> void topology_normalize_cpu_scale(void); +int topology_update_cpu_topology(void); struct device_node; bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 212b3822d180..2c9756bd9c4c 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -252,6 +252,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 +/* Define below the capability flags that are not offloads */ +#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -573,7 +575,7 @@ struct virtchnl_filter { enum virtchnl_flow_type flow_type; enum virtchnl_action action; u32 action_meta; - __u8 field_flags; + u8 field_flags; }; VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); @@ -596,10 +598,23 @@ enum virtchnl_event_codes { struct virtchnl_pf_event { enum virtchnl_event_codes event; union { + /* If the PF driver does not support the new speed reporting + * capabilities then use link_event else use link_event_adv to + * get the speed and link information. The ability to understand + * new speeds is indicated by setting the capability flag + * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter + * in virtchnl_vf_resource struct and can be used to determine + * which link event struct to use below. + */ struct { enum virtchnl_link_speed link_speed; bool link_status; } link_event; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u8 link_status; + } link_event_adv; } event_data; int severity; diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index c05f24fac4f6..e9f5fe69df31 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -78,7 +78,7 @@ struct linux_binprm { /* Function parameter for binfmt->coredump */ struct coredump_params { - const siginfo_t *siginfo; + const kernel_siginfo_t *siginfo; struct pt_regs *regs; struct file *file; unsigned long limit; diff --git a/include/linux/bio.h b/include/linux/bio.h index 51371740d2a8..b47c7f716731 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -21,12 +21,8 @@ #include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> -#include <linux/bug.h> #ifdef CONFIG_BLOCK - -#include <asm/io.h> - /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include <linux/blk_types.h> @@ -133,32 +129,6 @@ static inline bool bio_full(struct bio *bio) } /* - * will die - */ -#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) - -/* - * merge helpers etc - */ - -/* Default implementation of BIOVEC_PHYS_MERGEABLE */ -#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ - ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) - -/* - * allow arch override, for eg virtualized architectures (put in asm/io.h) - */ -#ifndef BIOVEC_PHYS_MERGEABLE -#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ - __BIOVEC_PHYS_MERGEABLE(vec1, vec2) -#endif - -#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ - (((addr1) | (mask)) == (((addr2) - 1) | (mask))) -#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ - __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) - -/* * drivers should _never_ use the all version - the bio may have been split * before it got to the driver and the driver won't own all of it */ @@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, { iter->bi_sector += bytes >> 9; - if (bio_no_advance_iter(bio)) { + if (bio_no_advance_iter(bio)) iter->bi_size -= bytes; - iter->bi_done += bytes; - } else { + else bvec_iter_advance(bio->bi_io_vec, iter, bytes); /* TODO: It is reasonable to complete bio with error here. */ - } -} - -static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter, - unsigned int bytes) -{ - iter->bi_sector -= bytes >> 9; - - if (bio_no_advance_iter(bio)) { - iter->bi_size += bytes; - iter->bi_done -= bytes; - return true; - } - - return bvec_iter_rewind(bio->bi_io_vec, iter, bytes); } #define __bio_for_each_segment(bvl, bio, iter, start) \ @@ -353,6 +307,8 @@ struct bio_integrity_payload { unsigned short bip_max_vcnt; /* integrity bio_vec slots */ unsigned short bip_flags; /* control flags */ + struct bvec_iter bio_iter; /* for rewinding parent bio */ + struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; @@ -547,23 +503,31 @@ do { \ disk_devt((bio)->bi_disk) #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); +int bio_associate_blkg_from_page(struct bio *bio, struct page *page); #else -static inline int bio_associate_blkcg_from_page(struct bio *bio, - struct page *page) { return 0; } +static inline int bio_associate_blkg_from_page(struct bio *bio, + struct page *page) { return 0; } #endif #ifdef CONFIG_BLK_CGROUP -int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); +int bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css); +int bio_associate_create_blkg(struct request_queue *q, struct bio *bio); +int bio_reassociate_blkg(struct request_queue *q, struct bio *bio); void bio_disassociate_task(struct bio *bio); -void bio_clone_blkcg_association(struct bio *dst, struct bio *src); +void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline int bio_associate_blkcg(struct bio *bio, - struct cgroup_subsys_state *blkcg_css) { return 0; } +static inline int bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css) +{ return 0; } +static inline int bio_associate_create_blkg(struct request_queue *q, + struct bio *bio) { return 0; } +static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio) +{ return 0; } static inline void bio_disassociate_task(struct bio *bio) { } -static inline void bio_clone_blkcg_association(struct bio *dst, - struct bio *src) { } +static inline void bio_clone_blkg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 6d766a19f2bb..1e76ceebeb5d 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -126,7 +126,7 @@ struct blkcg_gq { struct request_list rl; /* reference count */ - atomic_t refcnt; + struct percpu_ref refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); +struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); int blkcg_init_queue(struct request_queue *q); @@ -230,22 +232,59 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx); +/** + * blkcg_css - find the current css + * + * Find the css associated with either the kthread or the current task. + * This may return a dying css, so it is up to the caller to use tryget logic + * to confirm it is alive and well. + */ +static inline struct cgroup_subsys_state *blkcg_css(void) +{ + struct cgroup_subsys_state *css; + + css = kthread_blkcg(); + if (css) + return css; + return task_css(current, io_cgrp_id); +} static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } -static inline struct blkcg *bio_blkcg(struct bio *bio) +/** + * __bio_blkcg - internal version of bio_blkcg for bfq and cfq + * + * DO NOT USE. + * There is a flaw using this version of the function. In particular, this was + * used in a broken paradigm where association was called on the given css. It + * is possible though that the returned css from task_css() is in the process + * of dying due to migration of the current task. So it is improper to assume + * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will + * take additional work to handle more gracefully. + */ +static inline struct blkcg *__bio_blkcg(struct bio *bio) { - struct cgroup_subsys_state *css; + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return css_to_blkcg(blkcg_css()); +} - if (bio && bio->bi_css) - return css_to_blkcg(bio->bi_css); - css = kthread_blkcg(); - if (css) - return css_to_blkcg(css); - return css_to_blkcg(task_css(current, io_cgrp_id)); +/** + * bio_blkcg - grab the blkcg associated with a bio + * @bio: target bio + * + * This returns the blkcg associated with a bio, NULL if not associated. + * Callers are expected to either handle NULL or know association has been + * done prior to calling this. + */ +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return NULL; } static inline bool blk_cgroup_congested(void) @@ -451,26 +490,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) */ static inline void blkg_get(struct blkcg_gq *blkg) { - WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); - atomic_inc(&blkg->refcnt); + percpu_ref_get(&blkg->refcnt); } /** - * blkg_try_get - try and get a blkg reference + * blkg_tryget - try and get a blkg reference * @blkg: blkg to get * * This is for use when doing an RCU lookup of the blkg. We may be in the midst * of freeing this blkg, so we can only use it if the refcnt is not zero. */ -static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) +static inline bool blkg_tryget(struct blkcg_gq *blkg) { - if (atomic_inc_not_zero(&blkg->refcnt)) - return blkg; - return NULL; + return percpu_ref_tryget(&blkg->refcnt); } +/** + * blkg_tryget_closest - try and get a blkg ref on the closet blkg + * @blkg: blkg to get + * + * This walks up the blkg tree to find the closest non-dying blkg and returns + * the blkg that it did association with as it may not be the passed in blkg. + */ +static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) +{ + while (!percpu_ref_tryget(&blkg->refcnt)) + blkg = blkg->parent; -void __blkg_release_rcu(struct rcu_head *rcu); + return blkg; +} /** * blkg_put - put a blkg reference @@ -478,9 +526,7 @@ void __blkg_release_rcu(struct rcu_head *rcu); */ static inline void blkg_put(struct blkcg_gq *blkg) { - WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); - if (atomic_dec_and_test(&blkg->refcnt)) - call_rcu(&blkg->rcu_head, __blkg_release_rcu); + percpu_ref_put(&blkg->refcnt); } /** @@ -533,25 +579,36 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, rcu_read_lock(); - blkcg = bio_blkcg(bio); + if (bio && bio->bi_blkg) { + blkcg = bio->bi_blkg->blkcg; + if (blkcg == &blkcg_root) + goto rl_use_root; + + blkg_get(bio->bi_blkg); + rcu_read_unlock(); + return &bio->bi_blkg->rl; + } - /* bypass blkg lookup and use @q->root_rl directly for root */ + blkcg = css_to_blkcg(blkcg_css()); if (blkcg == &blkcg_root) - goto root_rl; + goto rl_use_root; - /* - * Try to use blkg->rl. blkg lookup may fail under memory pressure - * or if either the blkcg or queue is going away. Fall back to - * root_rl in such cases. - */ blkg = blkg_lookup(blkcg, q); if (unlikely(!blkg)) - goto root_rl; + blkg = __blkg_lookup_create(blkcg, q); + + if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg)) + goto rl_use_root; - blkg_get(blkg); rcu_read_unlock(); return &blkg->rl; -root_rl: + + /* + * Each blkg has its own request_list, however, the root blkcg + * uses the request_queue's root_rl. This is to avoid most + * overhead for the root blkcg. + */ +rl_use_root: rcu_read_unlock(); return &q->root_rl; } @@ -797,32 +854,26 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg struct bio *bio) { return false; } #endif + +static inline void blkcg_bio_issue_init(struct bio *bio) +{ + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); +} + static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { - struct blkcg *blkcg; struct blkcg_gq *blkg; bool throtl = false; rcu_read_lock(); - blkcg = bio_blkcg(bio); - - /* associate blkcg if bio hasn't attached one */ - bio_associate_blkcg(bio, &blkcg->css); - blkg = blkg_lookup(blkcg, q); - if (unlikely(!blkg)) { - spin_lock_irq(q->queue_lock); - blkg = blkg_lookup_create(blkcg, q); - if (IS_ERR(blkg)) - blkg = NULL; - spin_unlock_irq(q->queue_lock); - } + bio_associate_create_blkg(q, bio); + blkg = bio->bi_blkg; throtl = blk_throtl_bio(q, blkg, bio); if (!throtl) { - blkg = blkg ?: q->root_blkg; /* * If the bio is flagged with BIO_QUEUE_ENTERED it means this * is a split bio and we would have already accounted for the @@ -834,6 +885,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); } + blkcg_bio_issue_init(bio); + rcu_read_unlock(); return !throtl; } @@ -930,6 +983,7 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } +static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, @@ -945,6 +999,7 @@ static inline void blk_put_rl(struct request_list *rl) { } static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } +static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1da59c16f637..2286dc12c6bc 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -203,6 +203,10 @@ enum { struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); +struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set, + const struct blk_mq_ops *ops, + unsigned int queue_depth, + unsigned int set_flags); int blk_mq_register_dev(struct device *, struct request_queue *); void blk_mq_unregister_dev(struct device *, struct request_queue *); diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h new file mode 100644 index 000000000000..b80c65aba249 --- /dev/null +++ b/include/linux/blk-pm.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BLK_PM_H_ +#define _BLK_PM_H_ + +struct device; +struct request_queue; + +/* + * block layer runtime pm functions + */ +#ifdef CONFIG_PM +extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); +extern int blk_pre_runtime_suspend(struct request_queue *q); +extern void blk_post_runtime_suspend(struct request_queue *q, int err); +extern void blk_pre_runtime_resume(struct request_queue *q); +extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_set_runtime_active(struct request_queue *q); +#else +static inline void blk_pm_runtime_init(struct request_queue *q, + struct device *dev) {} +#endif + +#endif /* _BLK_PM_H_ */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f6dfb30737d8..093a818c5b68 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -178,7 +178,6 @@ struct bio { * release. Read comment on top of bio_associate_current(). */ struct io_context *bi_ioc; - struct cgroup_subsys_state *bi_css; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; #endif @@ -284,8 +283,6 @@ enum req_opf { REQ_OP_FLUSH = 2, /* discard sectors */ REQ_OP_DISCARD = 3, - /* get zone information */ - REQ_OP_ZONE_REPORT = 4, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, /* seset a zone write pointer */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6980014357d4..4293dc1cd160 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -108,7 +108,7 @@ typedef __u32 __bitwise req_flags_t; #define RQF_QUIET ((__force req_flags_t)(1 << 11)) /* elevator private data attached */ #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) -/* account I/O stat */ +/* account into disk and partition IO statistics */ #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) /* request came from our alloc pool */ #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) @@ -116,7 +116,7 @@ typedef __u32 __bitwise req_flags_t; #define RQF_PM ((__force req_flags_t)(1 << 15)) /* on IO scheduler merge hash */ #define RQF_HASHED ((__force req_flags_t)(1 << 16)) -/* IO stats tracking on */ +/* track IO completion time */ #define RQF_STATS ((__force req_flags_t)(1 << 17)) /* Look at ->special_vec for the actual data payload instead of the bio chain. */ @@ -396,16 +396,13 @@ struct queue_limits { #ifdef CONFIG_BLK_DEV_ZONED -struct blk_zone_report_hdr { - unsigned int nr_zones; - u8 padding[60]; -}; - +extern unsigned int blkdev_nr_zones(struct block_device *bdev); extern int blkdev_report_zones(struct block_device *bdev, sector_t sector, struct blk_zone *zones, unsigned int *nr_zones, gfp_t gfp_mask); extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); +extern int blk_revalidate_disk_zones(struct gendisk *disk); extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); @@ -414,6 +411,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, #else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int blkdev_nr_zones(struct block_device *bdev) +{ + return 0; +} + +static inline int blk_revalidate_disk_zones(struct gendisk *disk) +{ + return 0; +} + static inline int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) @@ -504,6 +511,12 @@ struct request_queue { * various queue flags, see QUEUE_* below */ unsigned long queue_flags; + /* + * Number of contexts that have called blk_set_pm_only(). If this + * counter is above zero then only RQF_PM and RQF_PREEMPT requests are + * processed. + */ + atomic_t pm_only; /* * ida allocated id for this queue. Used to index queues from @@ -679,7 +692,7 @@ struct request_queue { #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ #define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ -#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ +#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */ #define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ #define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ @@ -693,12 +706,12 @@ struct request_queue { #define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ #define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ #define QUEUE_FLAG_DAX 23 /* device supports DAX */ -#define QUEUE_FLAG_STATS 24 /* track rq completion times */ +#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */ #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ -#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */ +#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ @@ -731,17 +744,18 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) #define blk_queue_scsi_passthrough(q) \ test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) +#define blk_queue_pci_p2pdma(q) \ + test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) #define blk_noretry_request(rq) \ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ REQ_FAILFAST_DRIVER)) #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) -#define blk_queue_preempt_only(q) \ - test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags) +#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) -extern int blk_set_preempt_only(struct request_queue *q); -extern void blk_clear_preempt_only(struct request_queue *q); +extern void blk_set_pm_only(struct request_queue *q); +extern void blk_clear_pm_only(struct request_queue *q); static inline int queue_in_flight(struct request_queue *q) { @@ -799,6 +813,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) } #ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return blk_queue_is_zoned(q) ? q->nr_zones : 0; +} + static inline unsigned int blk_queue_zone_no(struct request_queue *q, sector_t sector) { @@ -814,6 +833,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q, return false; return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); } +#else /* CONFIG_BLK_DEV_ZONED */ +static inline unsigned int blk_queue_nr_zones(struct request_queue *q) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline bool rq_is_sync(struct request *rq) @@ -1281,29 +1305,6 @@ extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); /* - * block layer runtime pm functions - */ -#ifdef CONFIG_PM -extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); -extern int blk_pre_runtime_suspend(struct request_queue *q); -extern void blk_post_runtime_suspend(struct request_queue *q, int err); -extern void blk_pre_runtime_resume(struct request_queue *q); -extern void blk_post_runtime_resume(struct request_queue *q, int err); -extern void blk_set_runtime_active(struct request_queue *q); -#else -static inline void blk_pm_runtime_init(struct request_queue *q, - struct device *dev) {} -static inline int blk_pre_runtime_suspend(struct request_queue *q) -{ - return -ENOSYS; -} -static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} -static inline void blk_pre_runtime_resume(struct request_queue *q) {} -static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} -static inline void blk_set_runtime_active(struct request_queue *q) {} -#endif - -/* * blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests * into single larger request. As the requests are moved from a per-task list to @@ -1676,94 +1677,6 @@ static inline void put_dev_sector(Sector p) put_page(p.v); } -static inline bool __bvec_gap_to_prev(struct request_queue *q, - struct bio_vec *bprv, unsigned int offset) -{ - return offset || - ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); -} - -/* - * Check if adding a bio_vec after bprv with offset would create a gap in - * the SG list. Most drivers don't care about this, but some do. - */ -static inline bool bvec_gap_to_prev(struct request_queue *q, - struct bio_vec *bprv, unsigned int offset) -{ - if (!queue_virt_boundary(q)) - return false; - return __bvec_gap_to_prev(q, bprv, offset); -} - -/* - * Check if the two bvecs from two bios can be merged to one segment. - * If yes, no need to check gap between the two bios since the 1st bio - * and the 1st bvec in the 2nd bio can be handled in one segment. - */ -static inline bool bios_segs_mergeable(struct request_queue *q, - struct bio *prev, struct bio_vec *prev_last_bv, - struct bio_vec *next_first_bv) -{ - if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv)) - return false; - if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv)) - return false; - if (prev->bi_seg_back_size + next_first_bv->bv_len > - queue_max_segment_size(q)) - return false; - return true; -} - -static inline bool bio_will_gap(struct request_queue *q, - struct request *prev_rq, - struct bio *prev, - struct bio *next) -{ - if (bio_has_data(prev) && queue_virt_boundary(q)) { - struct bio_vec pb, nb; - - /* - * don't merge if the 1st bio starts with non-zero - * offset, otherwise it is quite difficult to respect - * sg gap limit. We work hard to merge a huge number of small - * single bios in case of mkfs. - */ - if (prev_rq) - bio_get_first_bvec(prev_rq->bio, &pb); - else - bio_get_first_bvec(prev, &pb); - if (pb.bv_offset) - return true; - - /* - * We don't need to worry about the situation that the - * merged segment ends in unaligned virt boundary: - * - * - if 'pb' ends aligned, the merged segment ends aligned - * - if 'pb' ends unaligned, the next bio must include - * one single bvec of 'nb', otherwise the 'nb' can't - * merge with 'pb' - */ - bio_get_last_bvec(prev, &pb); - bio_get_first_bvec(next, &nb); - - if (!bios_segs_mergeable(q, prev, &pb, &nb)) - return __bvec_gap_to_prev(q, &pb, nb.bv_offset); - } - - return false; -} - -static inline bool req_gap_back_merge(struct request *req, struct bio *bio) -{ - return bio_will_gap(req->q, req, req->biotail, bio); -} - -static inline bool req_gap_front_merge(struct request *req, struct bio *bio) -{ - return bio_will_gap(req->q, NULL, bio, req->bio); -} - int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_work_on(int cpu, struct work_struct *work); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); @@ -1843,26 +1756,6 @@ queue_max_integrity_segments(struct request_queue *q) return q->limits.max_integrity_segments; } -static inline bool integrity_req_gap_back_merge(struct request *req, - struct bio *next) -{ - struct bio_integrity_payload *bip = bio_integrity(req->bio); - struct bio_integrity_payload *bip_next = bio_integrity(next); - - return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], - bip_next->bip_vec[0].bv_offset); -} - -static inline bool integrity_req_gap_front_merge(struct request *req, - struct bio *bio) -{ - struct bio_integrity_payload *bip = bio_integrity(bio); - struct bio_integrity_payload *bip_next = bio_integrity(req->bio); - - return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], - bip_next->bip_vec[0].bv_offset); -} - /** * bio_integrity_intervals - Return number of integrity intervals for a bio * @bi: blk_integrity profile for device @@ -1947,17 +1840,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq, return true; } -static inline bool integrity_req_gap_back_merge(struct request *req, - struct bio *next) -{ - return false; -} -static inline bool integrity_req_gap_front_merge(struct request *req, - struct bio *bio) -{ - return false; -} - static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, unsigned int sectors) { @@ -1987,6 +1869,9 @@ struct block_device_operations { int (*getgeo)(struct block_device *, struct hd_geometry *); /* this callback is with swap_lock and sometimes page table lock held */ void (*swap_slot_free_notify) (struct block_device *, unsigned long); + int (*report_zones)(struct gendisk *, sector_t sector, + struct blk_zone *zones, unsigned int *nr_zones, + gfp_t gfp_mask); struct module *owner; const struct pr_ops *pr_ops; }; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index f91b0f8ff3a9..588dd5f0bd85 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -2,6 +2,7 @@ #ifndef _BPF_CGROUP_H #define _BPF_CGROUP_H +#include <linux/bpf.h> #include <linux/errno.h> #include <linux/jump_label.h> #include <linux/percpu.h> @@ -22,7 +23,11 @@ struct bpf_cgroup_storage; extern struct static_key_false cgroup_bpf_enabled_key; #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) -DECLARE_PER_CPU(void*, bpf_cgroup_storage); +DECLARE_PER_CPU(struct bpf_cgroup_storage*, + bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); + +#define for_each_cgroup_storage_type(stype) \ + for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) struct bpf_cgroup_storage_map; @@ -32,7 +37,10 @@ struct bpf_storage_buffer { }; struct bpf_cgroup_storage { - struct bpf_storage_buffer *buf; + union { + struct bpf_storage_buffer *buf; + void __percpu *percpu_buf; + }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; struct list_head list; @@ -43,7 +51,7 @@ struct bpf_cgroup_storage { struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; - struct bpf_cgroup_storage *storage; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array; @@ -101,18 +109,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum bpf_attach_type type); -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) +static inline enum bpf_cgroup_storage_type cgroup_storage_type( + struct bpf_map *map) { - struct bpf_storage_buffer *buf; + if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) + return BPF_CGROUP_STORAGE_PERCPU; + + return BPF_CGROUP_STORAGE_SHARED; +} - if (!storage) - return; +static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage + *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) +{ + enum bpf_cgroup_storage_type stype; - buf = READ_ONCE(storage->buf); - this_cpu_write(bpf_cgroup_storage, &buf->data[0]); + for_each_cgroup_storage_type(stype) + this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } -struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); +struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, + enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, struct cgroup *cgroup, @@ -121,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); +int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, + void *value, u64 flags); + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ @@ -265,15 +285,24 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} +static inline void bpf_cgroup_storage_set( + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map) { return 0; } static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( - struct bpf_prog *prog) { return 0; } + struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} +static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, + void *value) { + return 0; +} +static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, + void *key, void *value, u64 flags) { + return 0; +} #define cgroup_bpf_enabled (0) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) @@ -293,6 +322,8 @@ static inline void bpf_cgroup_storage_free( #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define for_each_cgroup_storage_type(stype) for (; false; ) + #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 523481a3471b..33014ae73103 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -39,6 +39,9 @@ struct bpf_map_ops { void *(*map_lookup_elem)(struct bpf_map *map, void *key); int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); + int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); + int (*map_pop_elem)(struct bpf_map *map, void *value); + int (*map_peek_elem)(struct bpf_map *map, void *value); /* funcs called by prog_array and perf_event_array map */ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, @@ -138,6 +141,7 @@ enum bpf_arg_type { ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ + ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ /* the following constraints used to prototype bpf_memcmp() and other * functions that access data on eBPF program stack @@ -154,6 +158,7 @@ enum bpf_arg_type { ARG_PTR_TO_CTX, /* pointer to context */ ARG_ANYTHING, /* any (initialized) argument is ok */ + ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */ }; /* type of values returned from helper functions */ @@ -162,6 +167,7 @@ enum bpf_return_type { RET_VOID, /* function doesn't return anything */ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ + RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -212,6 +218,9 @@ enum bpf_reg_type { PTR_TO_PACKET_META, /* skb->data - meta_len */ PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ + PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ + PTR_TO_SOCKET, /* reg points to struct bpf_sock */ + PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -258,6 +267,7 @@ struct bpf_verifier_ops { struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); + int (*finalize)(struct bpf_verifier_env *env); }; struct bpf_prog_offload { @@ -271,6 +281,14 @@ struct bpf_prog_offload { u32 jited_len; }; +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, + __BPF_CGROUP_STORAGE_MAX +}; + +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX + struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; @@ -288,7 +306,7 @@ struct bpf_prog_aux { struct bpf_prog *prog; struct user_struct *user; u64 load_time; /* ns since boottime */ - struct bpf_map *cgroup_storage; + struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; #ifdef CONFIG_SECURITY void *security; @@ -334,6 +352,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void); typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); +typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, + const struct bpf_insn *src, + struct bpf_insn *dst, + struct bpf_prog *prog, + u32 *target_size); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); @@ -357,7 +380,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, */ struct bpf_prog_array_item { struct bpf_prog *prog; - struct bpf_cgroup_storage *cgroup_storage; + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; struct bpf_prog_array { @@ -718,33 +741,18 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) } #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) -struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); -struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key); -int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); -int sockmap_get_from_fd(const union bpf_attr *attr, int type, - struct bpf_prog *prog); +#if defined(CONFIG_BPF_STREAM_PARSER) +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); #else -static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) -{ - return NULL; -} - -static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map, - void *key) -{ - return NULL; -} - -static inline int sock_map_prog(struct bpf_map *map, - struct bpf_prog *prog, - u32 type) +static inline int sock_map_prog_update(struct bpf_map *map, + struct bpf_prog *prog, u32 which) { return -EOPNOTSUPP; } -static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type, - struct bpf_prog *prog) +static inline int sock_map_get_from_fd(const union bpf_attr *attr, + struct bpf_prog *prog) { return -EINVAL; } @@ -806,6 +814,9 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_map_push_elem_proto; +extern const struct bpf_func_proto bpf_map_pop_elem_proto; +extern const struct bpf_func_proto bpf_map_peek_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; @@ -820,6 +831,10 @@ extern const struct bpf_func_proto bpf_get_stack_proto; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; +extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; +extern const struct bpf_func_proto bpf_msg_redirect_map_proto; +extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; +extern const struct bpf_func_proto bpf_sk_redirect_map_proto; extern const struct bpf_func_proto bpf_get_local_storage_proto; @@ -827,4 +842,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#if defined(CONFIG_NET) +bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info); +u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size); +#else +static inline bool bpf_sock_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + return false; +} +static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + return 0; +} +#endif + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index cd26c090e7c0..44d9ab4809bd 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -16,6 +16,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) +BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) @@ -42,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops) #endif #ifdef CONFIG_CGROUP_BPF BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) @@ -49,13 +51,13 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) #ifdef CONFIG_PERF_EVENTS -BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) +#if defined(CONFIG_BPF_STREAM_PARSER) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) #endif @@ -67,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) #endif #endif +BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b04f559ad3..9e8056ec20fa 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -41,6 +41,7 @@ enum bpf_reg_liveness { }; struct bpf_reg_state { + /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; union { /* valid when type == PTR_TO_PACKET */ @@ -57,9 +58,10 @@ struct bpf_reg_state { * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. + * For PTR_TO_SOCKET this is used to share which pointers retain the + * same reference to the socket, to determine proper reference freeing. */ u32 id; - /* Ordering of fields matters. See states_equal() */ /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset @@ -76,15 +78,15 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* parentage chain for liveness checking */ + struct bpf_reg_state *parent; /* Inside the callee two registers can be both PTR_TO_STACK like * R1=fp-8 and R2=fp-8, but one of them points to this function stack * while another to the caller's stack. To differentiate them 'frameno' * is used which is an index in bpf_verifier_state->frame[] array * pointing to bpf_func_state. - * This field must be second to last, for states_equal() reasons. */ u32 frameno; - /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -102,12 +104,22 @@ struct bpf_stack_state { u8 slot_type[BPF_REG_SIZE]; }; +struct bpf_reference_state { + /* Track each reference created with a unique id, even if the same + * instruction creates the reference multiple times (eg, via CALL). + */ + int id; + /* Instruction where the allocation of this reference occurred. This + * is used purely to inform the user of a reference leak. + */ + int insn_idx; +}; + /* state of the program: * type of all registers and stack info */ struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; - struct bpf_verifier_state *parent; /* index of call instruction that called into this func */ int callsite; /* stack frame number of this function state from pov of @@ -120,7 +132,9 @@ struct bpf_func_state { */ u32 subprogno; - /* should be second to last. See copy_func_state() */ + /* The following fields should be last. See copy_func_state() */ + int acquired_refs; + struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; }; @@ -129,10 +143,20 @@ struct bpf_func_state { struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; - struct bpf_verifier_state *parent; u32 curframe; }; +#define bpf_get_spilled_reg(slot, frame) \ + (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ + (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ? &frame->stack[slot].spilled_ptr : NULL) + +/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ +#define bpf_for_each_spilled_reg(iter, frame, reg) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ + iter < frame->allocated_stack / BPF_REG_SIZE; \ + iter++, reg = bpf_get_spilled_reg(iter, frame)) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -206,15 +230,21 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); -static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; - return cur->frame[cur->curframe]->regs; + return cur->frame[cur->curframe]; +} + +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +{ + return cur_func(env)->regs; } int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); +int bpf_prog_offload_finalize(struct bpf_verifier_env *env); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 96225a77c112..7b73ef7f902d 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -242,7 +242,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to); int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block); /* Convert errno to return value from ->page_mkwrite() call */ -static inline int block_page_mkwrite_return(int err) +static inline vm_fault_t block_page_mkwrite_return(int err) { if (err == 0) return VM_FAULT_LOCKED; diff --git a/include/linux/bvec.h b/include/linux/bvec.h index fe7a22dd133b..02c73c6aa805 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -40,8 +40,6 @@ struct bvec_iter { unsigned int bi_idx; /* current index into bvl_vec */ - unsigned int bi_done; /* number of bytes completed */ - unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ }; @@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, bytes -= len; iter->bi_size -= len; iter->bi_bvec_done += len; - iter->bi_done += len; if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { iter->bi_bvec_done = 0; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index ff20b677fb9f..5e1694fe035b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -20,6 +20,7 @@ #include <linux/u64_stats_sync.h> #include <linux/workqueue.h> #include <linux/bpf-cgroup.h> +#include <linux/psi_types.h> #ifdef CONFIG_CGROUPS @@ -412,6 +413,7 @@ struct cgroup { * specific task are charged to the dom_cgrp. */ struct cgroup *dom_cgrp; + struct cgroup *old_dom_cgrp; /* used while enabling threaded */ /* per-cpu recursive resource statistics */ struct cgroup_rstat_cpu __percpu *rstat_cpu; @@ -435,6 +437,9 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; + /* used to track pressure stalls */ + struct psi_group psi; + /* used to store eBPF programs */ struct cgroup_bpf bpf; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 32c553556bbd..9968332cceed 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -93,6 +93,8 @@ extern struct css_set init_css_set; bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, @@ -567,20 +569,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp, static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, int ancestor_level) { - struct cgroup *ptr; - if (cgrp->level < ancestor_level) return NULL; - - for (ptr = cgrp; - ptr && ptr->level > ancestor_level; - ptr = cgroup_parent(ptr)) - ; - - if (ptr && ptr->level == ancestor_level) - return ptr; - - return NULL; + while (cgrp && cgrp->level > ancestor_level) + cgrp = cgroup_parent(cgrp); + return cgrp; } /** @@ -657,6 +650,11 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return &cgrp->psi; +} + static inline void cgroup_init_kthreadd(void) { /* @@ -710,6 +708,16 @@ static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) return NULL; } +static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) +{ + return NULL; +} + +static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) +{ + return NULL; +} + static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 308918928767..b21db536fd52 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz __clocksource_update_freq_scale(cs, 1000, khz); } +#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT +extern void clocksource_arch_init(struct clocksource *cs); +#else +static inline void clocksource_arch_init(struct clocksource *cs) { } +#endif extern int timekeeping_notify(struct clocksource *clock); @@ -257,9 +262,6 @@ extern int clocksource_i8253_init(void); #define TIMER_OF_DECLARE(name, compat, fn) \ OF_DECLARE_1_RET(timer, name, compat, fn) -#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ - TIMER_OF_DECLARE(name, compat, fn) - #ifdef CONFIG_TIMER_PROBE extern void timer_probe(void); #else diff --git a/include/linux/compat.h b/include/linux/compat.h index 1a3c4f37e908..d30e4dbd4be2 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -7,7 +7,7 @@ */ #include <linux/types.h> -#include <linux/compat_time.h> +#include <linux/time.h> #include <linux/stat.h> #include <linux/param.h> /* for HZ */ @@ -103,6 +103,9 @@ typedef struct compat_sigaltstack { compat_size_t ss_size; } compat_stack_t; #endif +#ifndef COMPAT_MINSIGSTKSZ +#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ +#endif #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) @@ -110,19 +113,12 @@ typedef struct compat_sigaltstack { typedef __compat_uid32_t compat_uid_t; typedef __compat_gid32_t compat_gid_t; -typedef compat_ulong_t compat_aio_context_t; - struct compat_sel_arg_struct; struct rusage; -struct compat_utimbuf { - compat_time_t actime; - compat_time_t modtime; -}; - struct compat_itimerval { - struct compat_timeval it_interval; - struct compat_timeval it_value; + struct old_timeval32 it_interval; + struct old_timeval32 it_value; }; struct itimerval; @@ -146,7 +142,7 @@ struct compat_timex { compat_long_t constant; compat_long_t precision; compat_long_t tolerance; - struct compat_timeval time; + struct old_timeval32 time; compat_long_t tick; compat_long_t ppsfreq; compat_long_t jitter; @@ -307,8 +303,8 @@ struct compat_rlimit { }; struct compat_rusage { - struct compat_timeval ru_utime; - struct compat_timeval ru_stime; + struct old_timeval32 ru_utime; + struct old_timeval32 ru_stime; compat_long_t ru_maxrss; compat_long_t ru_ixrss; compat_long_t ru_idrss; @@ -452,13 +448,13 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); -int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); -int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); +int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); -static inline int compat_timeval_compare(struct compat_timeval *lhs, - struct compat_timeval *rhs) +static inline int old_timeval32_compare(struct old_timeval32 *lhs, + struct old_timeval32 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; @@ -467,8 +463,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs, return lhs->tv_usec - rhs->tv_usec; } -static inline int compat_timespec_compare(struct compat_timespec *lhs, - struct compat_timespec *rhs) +static inline int old_timespec32_compare(struct old_timespec32 *lhs, + struct old_timespec32 *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; @@ -552,12 +548,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, struct io_event __user *events, - struct compat_timespec __user *timeout); + struct old_timespec32 __user *timeout); asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, compat_long_t min_nr, compat_long_t nr, struct io_event __user *events, - struct compat_timespec __user *timeout, + struct old_timespec32 __user *timeout, const struct __compat_aio_sigset __user *usig); /* fs/cookies.c */ @@ -642,11 +638,11 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timespec __user *tsp, + struct old_timespec32 __user *tsp, void __user *sig); asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, - struct compat_timespec __user *tsp, + struct old_timespec32 __user *tsp, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); @@ -671,15 +667,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd, /* fs/timerfd.c */ asmlinkage long compat_sys_timerfd_gettime(int ufd, - struct compat_itimerspec __user *otmr); + struct old_itimerspec32 __user *otmr); asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, - const struct compat_itimerspec __user *utmr, - struct compat_itimerspec __user *otmr); + const struct old_itimerspec32 __user *utmr, + struct old_itimerspec32 __user *otmr); /* fs/utimes.c */ asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, - struct compat_timespec __user *t, + struct old_timespec32 __user *t, int flags); /* kernel/exit.c */ @@ -691,7 +687,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t, /* kernel/futex.c */ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, - struct compat_timespec __user *utime, u32 __user *uaddr2, + struct old_timespec32 __user *utime, u32 __user *uaddr2, u32 val3); asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, @@ -701,8 +697,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, compat_size_t __user *len_ptr); /* kernel/hrtimer.c */ -asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, - struct compat_timespec __user *rmtp); +asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/itimer.c */ asmlinkage long compat_sys_getitimer(int which, @@ -722,19 +718,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, timer_t __user *created_timer_id); asmlinkage long compat_sys_timer_gettime(timer_t timer_id, - struct compat_itimerspec __user *setting); + struct old_itimerspec32 __user *setting); asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, - struct compat_itimerspec __user *new, - struct compat_itimerspec __user *old); + struct old_itimerspec32 __user *new, + struct old_itimerspec32 __user *old); asmlinkage long compat_sys_clock_settime(clockid_t which_clock, - struct compat_timespec __user *tp); + struct old_timespec32 __user *tp); asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, - struct compat_timespec __user *tp); + struct old_timespec32 __user *tp); asmlinkage long compat_sys_clock_getres(clockid_t which_clock, - struct compat_timespec __user *tp); + struct old_timespec32 __user *tp); asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, - struct compat_timespec __user *rqtp, - struct compat_timespec __user *rmtp); + struct old_timespec32 __user *rqtp, + struct old_timespec32 __user *rmtp); /* kernel/ptrace.c */ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, @@ -748,7 +744,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, compat_ulong_t __user *user_mask_ptr); asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval); + struct old_timespec32 __user *interval); /* kernel/signal.c */ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, @@ -768,7 +764,7 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, - struct compat_timespec __user *uts, compat_size_t sigsetsize); + struct old_timespec32 __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); /* No generic prototype for rt_sigreturn */ @@ -782,9 +778,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru); /* kernel/time.c */ -asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, +asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); -asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, +asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, struct timezone __user *tz); asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); @@ -798,11 +794,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, compat_size_t msg_len, unsigned int msg_prio, - const struct compat_timespec __user *u_abs_timeout); + const struct old_timespec32 __user *u_abs_timeout); asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, compat_size_t msg_len, unsigned int __user *u_msg_prio, - const struct compat_timespec __user *u_abs_timeout); + const struct old_timespec32 __user *u_abs_timeout); asmlinkage long compat_sys_mq_notify(mqd_t mqdes, const struct compat_sigevent __user *u_notification); asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, @@ -819,7 +815,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, /* ipc/sem.c */ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsems, const struct compat_timespec __user *timeout); + unsigned nsems, const struct old_timespec32 __user *timeout); /* ipc/shm.c */ asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); @@ -876,7 +872,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, struct compat_siginfo __user *uinfo); asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, - struct compat_timespec __user *timeout); + struct old_timespec32 __user *timeout); asmlinkage long compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru); @@ -928,7 +924,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd, asmlinkage long compat_sys_open(const char __user *filename, int flags, umode_t mode); asmlinkage long compat_sys_utimes(const char __user *filename, - struct compat_timeval __user *t); + struct old_timeval32 __user *t); /* __ARCH_WANT_SYSCALL_NO_FLAGS */ asmlinkage long compat_sys_signalfd(int ufd, @@ -942,15 +938,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename, struct compat_stat __user *statbuf); /* __ARCH_WANT_SYSCALL_DEPRECATED */ -asmlinkage long compat_sys_time(compat_time_t __user *tloc); +asmlinkage long compat_sys_time(old_time32_t __user *tloc); asmlinkage long compat_sys_utime(const char __user *filename, - struct compat_utimbuf __user *t); + struct old_utimbuf32 __user *t); asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, - struct compat_timeval __user *t); + struct old_timeval32 __user *t); asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, - struct compat_timeval __user *tvp); + struct old_timeval32 __user *tvp); asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, unsigned flags); @@ -983,7 +979,7 @@ asmlinkage long compat_sys_sigaction(int sig, #endif /* obsolete: kernel/time/time.c */ -asmlinkage long compat_sys_stime(compat_time_t __user *tptr); +asmlinkage long compat_sys_stime(old_time32_t __user *tptr); /* obsolete: net/socket.c */ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); @@ -1002,15 +998,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); } #endif /** - * ns_to_compat_timeval - Compat version of ns_to_timeval + * ns_to_old_timeval32 - Compat version of ns_to_timeval * @nsec: the nanoseconds value to be converted * - * Returns the compat_timeval representation of the nsec parameter. + * Returns the old_timeval32 representation of the nsec parameter. */ -static inline struct compat_timeval ns_to_compat_timeval(s64 nsec) +static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec) { struct timeval tv; - struct compat_timeval ctv; + struct old_timeval32 ctv; tv = ns_to_timeval(nsec); ctv.tv_sec = tv.tv_sec; diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h deleted file mode 100644 index e70bfd1d2c3f..000000000000 --- a/include/linux/compat_time.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_COMPAT_TIME_H -#define _LINUX_COMPAT_TIME_H - -#include <linux/types.h> -#include <linux/time64.h> - -typedef s32 compat_time_t; - -struct compat_timespec { - compat_time_t tv_sec; - s32 tv_nsec; -}; - -struct compat_timeval { - compat_time_t tv_sec; - s32 tv_usec; -}; - -struct compat_itimerspec { - struct compat_timespec it_interval; - struct compat_timespec it_value; -}; - -extern int compat_get_timespec64(struct timespec64 *, const void __user *); -extern int compat_put_timespec64(const struct timespec64 *, void __user *); -extern int get_compat_itimerspec64(struct itimerspec64 *its, - const struct compat_itimerspec __user *uits); -extern int put_compat_itimerspec64(const struct itimerspec64 *its, - struct compat_itimerspec __user *uits); - -#endif /* _LINUX_COMPAT_TIME_H */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 4d36b27214fd..90ddfefb6c2b 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -194,6 +194,12 @@ * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 */ #define __no_sanitize_address __attribute__((no_sanitize_address)) +#ifdef CONFIG_KASAN +#define __no_sanitize_address_or_inline \ + __no_sanitize_address __maybe_unused notrace +#else +#define __no_sanitize_address_or_inline inline +#endif #endif #if GCC_VERSION >= 50100 @@ -211,6 +217,7 @@ #if !defined(__no_sanitize_address) #define __no_sanitize_address +#define __no_sanitize_address_or_inline inline #endif /* diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 681d866efb1e..1921545c6351 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -99,22 +99,13 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * unique, to convince GCC not to merge duplicate inline asm statements. */ #define annotate_reachable() ({ \ - asm volatile("%c0:\n\t" \ - ".pushsection .discard.reachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + asm volatile("ANNOTATE_REACHABLE counter=%c0" \ + : : "i" (__COUNTER__)); \ }) #define annotate_unreachable() ({ \ - asm volatile("%c0:\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long %c0b - .\n\t" \ - ".popsection\n\t" : : "i" (__COUNTER__)); \ + asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ + : : "i" (__COUNTER__)); \ }) -#define ASM_UNREACHABLE \ - "999:\n\t" \ - ".pushsection .discard.unreachable\n\t" \ - ".long 999b - .\n\t" \ - ".popsection\n\t" #else #define annotate_reachable() #define annotate_unreachable() @@ -299,6 +290,45 @@ static inline void *offset_to_ptr(const int *off) return (void *)((unsigned long)off + *off); } +#else /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +#ifndef LINKER_SCRIPT + +#ifdef CONFIG_STACK_VALIDATION +.macro ANNOTATE_UNREACHABLE counter:req +\counter: + .pushsection .discard.unreachable + .long \counter\()b -. + .popsection +.endm + +.macro ANNOTATE_REACHABLE counter:req +\counter: + .pushsection .discard.reachable + .long \counter\()b -. + .popsection +.endm + +.macro ASM_UNREACHABLE +999: + .pushsection .discard.unreachable + .long 999b - . + .popsection +.endm +#else /* CONFIG_STACK_VALIDATION */ +.macro ANNOTATE_UNREACHABLE counter:req +.endm + +.macro ANNOTATE_REACHABLE counter:req +.endm + +.macro ASM_UNREACHABLE +.endm +#endif /* CONFIG_STACK_VALIDATION */ + +#endif /* LINKER_SCRIPT */ +#endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #ifndef __optimize diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index db192becfec4..97cfe29b3f0a 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -198,7 +198,6 @@ struct ftrace_likely_data { */ #define __pure __attribute__((pure)) #define __aligned(x) __attribute__((aligned(x))) -#define __aligned_largest __attribute__((aligned)) #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) #define __maybe_unused __attribute__((unused)) diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 207aed96a5b7..abf4b4e65dbb 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -17,9 +17,9 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP -extern void do_coredump(const siginfo_t *siginfo); +extern void do_coredump(const kernel_siginfo_t *siginfo); #else -static inline void do_coredump(const siginfo_t *siginfo) {} +static inline void do_coredump(const kernel_siginfo_t *siginfo) {} #endif #endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index d828a6efe0b1..46c67a764877 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -94,20 +94,15 @@ union coresight_dev_subtype { * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. * @name: name of the component as shown under sysfs. * @nr_inport: number of input ports for this component. - * @outports: list of remote endpoint port number. - * @child_names:name of all child components connected to this device. - * @child_ports:child component port number the current component is - connected to. * @nr_outport: number of output ports for this component. + * @conns: Array of nr_outport connections from this component */ struct coresight_platform_data { int cpu; const char *name; int nr_inport; - int *outports; - const char **child_names; - int *child_ports; int nr_outport; + struct coresight_connection *conns; }; /** @@ -190,23 +185,15 @@ struct coresight_device { * @disable: disables the sink. * @alloc_buffer: initialises perf's ring buffer for trace collection. * @free_buffer: release memory allocated in @get_config. - * @set_buffer: initialises buffer mechanic before a trace session. - * @reset_buffer: finalises buffer mechanic after a trace session. * @update_buffer: update buffer pointers after a trace session. */ struct coresight_ops_sink { - int (*enable)(struct coresight_device *csdev, u32 mode); + int (*enable)(struct coresight_device *csdev, u32 mode, void *data); void (*disable)(struct coresight_device *csdev); void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, void **pages, int nr_pages, bool overwrite); void (*free_buffer)(void *config); - int (*set_buffer)(struct coresight_device *csdev, - struct perf_output_handle *handle, - void *sink_config); - unsigned long (*reset_buffer)(struct coresight_device *csdev, - struct perf_output_handle *handle, - void *sink_config); - void (*update_buffer)(struct coresight_device *csdev, + unsigned long (*update_buffer)(struct coresight_device *csdev, struct perf_output_handle *handle, void *sink_config); }; @@ -270,6 +257,13 @@ extern int coresight_enable(struct coresight_device *csdev); extern void coresight_disable(struct coresight_device *csdev); extern int coresight_timeout(void __iomem *addr, u32 offset, int position, int value); + +extern int coresight_claim_device(void __iomem *base); +extern int coresight_claim_device_unlocked(void __iomem *base); + +extern void coresight_disclaim_device(void __iomem *base); +extern void coresight_disclaim_device_unlocked(void __iomem *base); + #else static inline struct coresight_device * coresight_register(struct coresight_desc *desc) { return NULL; } @@ -279,6 +273,19 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } static inline void coresight_disable(struct coresight_device *csdev) {} static inline int coresight_timeout(void __iomem *addr, u32 offset, int position, int value) { return 1; } +static inline int coresight_claim_device_unlocked(void __iomem *base) +{ + return -EINVAL; +} + +static inline int coresight_claim_device(void __iomem *base) +{ + return -EINVAL; +} + +static inline void coresight_disclaim_device(void __iomem *base) {} +static inline void coresight_disclaim_device_unlocked(void __iomem *base) {} + #endif #ifdef CONFIG_OF diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h index 986c06c88d81..84d3c81b5978 100644 --- a/include/linux/cpufeature.h +++ b/include/linux/cpufeature.h @@ -45,7 +45,7 @@ * 'asm/cpufeature.h' of your favorite architecture. */ #define module_cpu_feature_match(x, __initfunc) \ -static struct cpu_feature const cpu_feature_match_ ## x[] = \ +static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \ { { .feature = cpu_feature(x) }, { } }; \ MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ \ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 4325d6fdde9b..faed7a8977e8 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -81,6 +81,7 @@ struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; unsigned int use_deepest_state:1; + unsigned int poll_time_limit:1; unsigned int cpu; int last_residency; @@ -99,16 +100,6 @@ struct cpuidle_device { DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); -/** - * cpuidle_get_last_residency - retrieves the last state's residency time - * @dev: the target CPU - */ -static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) -{ - return dev->last_residency; -} - - /**************************** * CPUIDLE DRIVER INTERFACE * ****************************/ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 3e4ba9d753c8..f774c5eb9e3c 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -26,6 +26,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); +extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf); + void vmcore_cleanup(void); /* Architecture code defines this if there are other possible ELF diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h index 1fe0cfcdea30..6bb0c0bf357b 100644 --- a/include/linux/crc-t10dif.h +++ b/include/linux/crc-t10dif.h @@ -6,6 +6,7 @@ #define CRC_T10DIF_DIGEST_SIZE 2 #define CRC_T10DIF_BLOCK_SIZE 1 +#define CRC_T10DIF_STRING "crct10dif" extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index e8839d3a7559..3634ad6fe202 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -454,6 +454,33 @@ struct compress_alg { * @cra_refcnt: internally used * @cra_destroy: internally used * + * All following statistics are for this crypto_alg + * @encrypt_cnt: number of encrypt requests + * @decrypt_cnt: number of decrypt requests + * @compress_cnt: number of compress requests + * @decompress_cnt: number of decompress requests + * @generate_cnt: number of RNG generate requests + * @seed_cnt: number of times the rng was seeded + * @hash_cnt: number of hash requests + * @sign_cnt: number of sign requests + * @setsecret_cnt: number of setsecrey operation + * @generate_public_key_cnt: number of generate_public_key operation + * @verify_cnt: number of verify operation + * @compute_shared_secret_cnt: number of compute_shared_secret operation + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @compress_tlen: total data size handled by compress requests + * @decompress_tlen: total data size handled by decompress requests + * @generate_tlen: total data size of generated data by the RNG + * @hash_tlen: total data size hashed + * @akcipher_err_cnt: number of error for akcipher requests + * @cipher_err_cnt: number of error for akcipher requests + * @compress_err_cnt: number of error for akcipher requests + * @aead_err_cnt: number of error for akcipher requests + * @hash_err_cnt: number of error for akcipher requests + * @rng_err_cnt: number of error for akcipher requests + * @kpp_err_cnt: number of error for akcipher requests + * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not * be used by a cipher implementation as it is internal to the Crypto API. @@ -487,6 +514,45 @@ struct crypto_alg { void (*cra_destroy)(struct crypto_alg *alg); struct module *cra_module; + + union { + atomic_t encrypt_cnt; + atomic_t compress_cnt; + atomic_t generate_cnt; + atomic_t hash_cnt; + atomic_t setsecret_cnt; + }; + union { + atomic64_t encrypt_tlen; + atomic64_t compress_tlen; + atomic64_t generate_tlen; + atomic64_t hash_tlen; + }; + union { + atomic_t akcipher_err_cnt; + atomic_t cipher_err_cnt; + atomic_t compress_err_cnt; + atomic_t aead_err_cnt; + atomic_t hash_err_cnt; + atomic_t rng_err_cnt; + atomic_t kpp_err_cnt; + }; + union { + atomic_t decrypt_cnt; + atomic_t decompress_cnt; + atomic_t seed_cnt; + atomic_t generate_public_key_cnt; + }; + union { + atomic64_t decrypt_tlen; + atomic64_t decompress_tlen; + }; + union { + atomic_t verify_cnt; + atomic_t compute_shared_secret_cnt; + }; + atomic_t sign_cnt; + } CRYPTO_MINALIGN_ATTR; /* @@ -907,6 +973,38 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( return __crypto_ablkcipher_cast(req->base.tfm); } +static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); + } else { + atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt); + atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen); + } +#endif +} + +static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req, + int ret) +{ +#ifdef CONFIG_CRYPTO_STATS + struct ablkcipher_tfm *crt = + crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + + if (ret && ret != -EINPROGRESS && ret != -EBUSY) { + atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); + } else { + atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt); + atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen); + } +#endif +} + /** * crypto_ablkcipher_encrypt() - encrypt plaintext * @req: reference to the ablkcipher_request handle that holds all information @@ -922,7 +1020,11 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - return crt->encrypt(req); + int ret; + + ret = crt->encrypt(req); + crypto_stat_ablkcipher_encrypt(req, ret); + return ret; } /** @@ -940,7 +1042,11 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - return crt->decrypt(req); + int ret; + + ret = crt->decrypt(req); + crypto_stat_ablkcipher_decrypt(req, ret); + return ret; } /** diff --git a/include/linux/cuda.h b/include/linux/cuda.h index 056867f09a01..45bfe9d61271 100644 --- a/include/linux/cuda.h +++ b/include/linux/cuda.h @@ -8,6 +8,7 @@ #ifndef _LINUX_CUDA_H #define _LINUX_CUDA_H +#include <linux/rtc.h> #include <uapi/linux/cuda.h> @@ -16,4 +17,7 @@ extern int cuda_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...); extern void cuda_poll(void); +extern time64_t cuda_get_time(void); +extern int cuda_set_rtc_time(struct rtc_time *tm); + #endif /* _LINUX_CUDA_H */ diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 120225e9a366..257ab3c92cb8 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -8,8 +8,8 @@ struct task_struct; -extern int debug_locks; -extern int debug_locks_silent; +extern int debug_locks __read_mostly; +extern int debug_locks_silent __read_mostly; static inline int __debug_locks_off(void) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 31c865d1842e..577d1b25fccd 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -57,7 +57,12 @@ struct task_delay_info { u64 freepages_start; u64 freepages_delay; /* wait for memory reclaim */ + + u64 thrashing_start; + u64 thrashing_delay; /* wait for thrashing page */ + u32 freepages_count; /* total count of memory reclaim */ + u32 thrashing_count; /* total count of thrash waits */ }; #endif @@ -76,6 +81,8 @@ extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); extern void __delayacct_freepages_start(void); extern void __delayacct_freepages_end(void); +extern void __delayacct_thrashing_start(void); +extern void __delayacct_thrashing_end(void); static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { @@ -156,6 +163,18 @@ static inline void delayacct_freepages_end(void) __delayacct_freepages_end(); } +static inline void delayacct_thrashing_start(void) +{ + if (current->delays) + __delayacct_thrashing_start(); +} + +static inline void delayacct_thrashing_end(void) +{ + if (current->delays) + __delayacct_thrashing_end(); +} + #else static inline void delayacct_set_flag(int flag) {} @@ -182,6 +201,10 @@ static inline void delayacct_freepages_start(void) {} static inline void delayacct_freepages_end(void) {} +static inline void delayacct_thrashing_start(void) +{} +static inline void delayacct_thrashing_end(void) +{} #endif /* CONFIG_TASK_DELAY_ACCT */ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 3aae5b3af87c..e4963b0f45da 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -198,6 +198,14 @@ extern void devm_devfreq_remove_device(struct device *dev, extern int devfreq_suspend_device(struct devfreq *devfreq); extern int devfreq_resume_device(struct devfreq *devfreq); +/** + * update_devfreq() - Reevaluate the device and configure frequency + * @devfreq: the devfreq device + * + * Note: devfreq->lock must be held + */ +extern int update_devfreq(struct devfreq *devfreq); + /* Helper functions for devfreq user device driver with OPP. */ extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 6fb0808e87c8..e528baebad69 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -26,9 +26,8 @@ enum dm_queue_mode { DM_TYPE_NONE = 0, DM_TYPE_BIO_BASED = 1, DM_TYPE_REQUEST_BASED = 2, - DM_TYPE_MQ_REQUEST_BASED = 3, - DM_TYPE_DAX_BIO_BASED = 4, - DM_TYPE_NVME_BIO_BASED = 5, + DM_TYPE_DAX_BIO_BASED = 3, + DM_TYPE_NVME_BIO_BASED = 4, }; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; @@ -92,6 +91,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); +typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, + struct blk_zone *zones, + unsigned int *nr_zones, + gfp_t gfp_mask); + /* * These iteration functions are typically used to check (and combine) * properties of underlying devices. @@ -180,6 +184,9 @@ struct target_type { dm_status_fn status; dm_message_fn message; dm_prepare_ioctl_fn prepare_ioctl; +#ifdef CONFIG_BLK_DEV_ZONED + dm_report_zones_fn report_zones; +#endif dm_busy_fn busy; dm_iterate_devices_fn iterate_devices; dm_io_hints_fn io_hints; @@ -420,8 +427,8 @@ struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); -void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, - sector_t start); +void dm_remap_zone_report(struct dm_target *ti, sector_t start, + struct blk_zone *zones, unsigned int *nr_zones); union map_info *dm_get_rq_mapinfo(struct request *rq); struct queue_limits *dm_get_queue_limits(struct mapped_device *md); @@ -490,6 +497,7 @@ sector_t dm_table_get_size(struct dm_table *t); unsigned int dm_table_get_num_targets(struct dm_table *t); fmode_t dm_table_get_mode(struct dm_table *t); struct mapped_device *dm_table_get_md(struct dm_table *t); +const char *dm_table_device_name(struct dm_table *t); /* * Trigger an event. diff --git a/include/linux/device.h b/include/linux/device.h index 8f882549edee..1b25c7a43f4c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -55,6 +55,8 @@ struct bus_attribute { struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) #define BUS_ATTR_RO(_name) \ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) +#define BUS_ATTR_WO(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_WO(_name) extern int __must_check bus_create_file(struct bus_type *, struct bus_attribute *); @@ -692,8 +694,10 @@ static inline void *devm_kcalloc(struct device *dev, { return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } -extern void devm_kfree(struct device *dev, void *p); +extern void devm_kfree(struct device *dev, const void *p); extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; +extern const char *devm_kstrdup_const(struct device *dev, + const char *s, gfp_t gfp); extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); @@ -774,6 +778,30 @@ void device_connection_add(struct device_connection *con); void device_connection_remove(struct device_connection *con); /** + * device_connections_add - Add multiple device connections at once + * @cons: Zero terminated array of device connection descriptors + */ +static inline void device_connections_add(struct device_connection *cons) +{ + struct device_connection *c; + + for (c = cons; c->endpoint[0]; c++) + device_connection_add(c); +} + +/** + * device_connections_remove - Remove multiple device connections at once + * @cons: Zero terminated array of device connection descriptors + */ +static inline void device_connections_remove(struct device_connection *cons) +{ + struct device_connection *c; + + for (c = cons; c->endpoint[0]; c++) + device_connection_remove(c); +} + +/** * enum device_link_state - Device link states. * @DL_STATE_NONE: The presence of the drivers is not being tracked. * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present. @@ -927,6 +955,8 @@ struct dev_links_info { * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. + * @dma_coherent: this particular device is dma coherent, even if the + * architecture supports non-coherent devices. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -1016,6 +1046,11 @@ struct device { bool offline_disabled:1; bool offline:1; bool of_node_reused:1; +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + bool dma_coherent:1; +#endif }; static inline struct device *kobj_to_dev(struct kobject *kobj) diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index a785f2507159..30213adbb6b9 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus); extern int dma_debug_resize_entries(u32 num_entries); +extern void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len); + extern void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, @@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries) return 0; } +static inline void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len) +{ +} + static inline void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 8d9f33febde5..bd73e7a91410 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -5,6 +5,8 @@ #include <linux/dma-mapping.h> #include <linux/mem_encrypt.h> +#define DIRECT_MAPPING_ERROR 0 + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include <asm/dma-direct.h> #else @@ -27,7 +29,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return false; - return addr + size - 1 <= *dev->dma_mask; + return addr + size - 1 <= + min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ @@ -55,10 +58,15 @@ static inline void dma_mark_clean(void *addr, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ +u64 dma_direct_get_required_mask(struct device *dev); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); +void *dma_direct_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); +void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1db6a6b46d0d..15bd41447025 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -130,13 +130,10 @@ struct dma_map_ops { enum dma_data_direction direction); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK u64 (*get_required_mask)(struct device *dev); -#endif }; extern const struct dma_map_ops dma_direct_ops; -extern const struct dma_map_ops dma_noncoherent_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -232,6 +229,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); + debug_dma_map_single(dev, ptr, size); addr = ops->map_page(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs); @@ -445,7 +443,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, } extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); void *dma_common_contiguous_remap(struct page *page, size_t size, unsigned long vm_flags, @@ -477,14 +476,14 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) int -dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size); +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, unsigned long attrs); static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, @@ -496,7 +495,8 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); - return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); } #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) @@ -558,9 +558,11 @@ static inline void dma_free_attrs(struct device *dev, size_t size, } static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_handle, flag, 0); + + return dma_alloc_attrs(dev, size, dma_handle, gfp, + (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } static inline void dma_free_coherent(struct device *dev, size_t size, @@ -753,18 +755,6 @@ dma_mark_declared_memory_occupied(struct device *dev, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -#ifdef CONFIG_HAS_DMA -int dma_configure(struct device *dev); -void dma_deconfigure(struct device *dev); -#else -static inline int dma_configure(struct device *dev) -{ - return 0; -} - -static inline void dma_deconfigure(struct device *dev) {} -#endif - /* * Managed DMA API */ @@ -806,8 +796,12 @@ static inline void dmam_release_declared_memory(struct device *dev) static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_addr, gfp, - DMA_ATTR_WRITE_COMBINE); + unsigned long attrs = DMA_ATTR_NO_WARN; + + if (gfp & __GFP_NOWARN) + attrs |= DMA_ATTR_NO_WARN; + + return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); } #ifndef dma_alloc_writecombine #define dma_alloc_writecombine dma_alloc_wc diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index a0aa00cc909d..9051b055beec 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -4,18 +4,35 @@ #include <linux/dma-mapping.h> +#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H +#include <asm/dma-coherence.h> +#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return dev->dma_coherent; +} +#else +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return true; +} +#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr); -#ifdef CONFIG_DMA_NONCOHERENT_MMAP -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, +#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); #else -#define arch_dma_mmap NULL -#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ +# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) +#endif #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h index b0115e340fbc..b42b80e52cc2 100644 --- a/include/linux/dma/sprd-dma.h +++ b/include/linux/dma/sprd-dma.h @@ -58,4 +58,73 @@ enum sprd_dma_int_type { SPRD_DMA_CFGERR_INT, }; +/* + * struct sprd_dma_linklist - DMA link-list address structure + * @virt_addr: link-list virtual address to configure link-list node + * @phy_addr: link-list physical address to link DMA transfer + * + * The Spreadtrum DMA controller supports the link-list mode, that means slaves + * can supply several groups configurations (each configuration represents one + * DMA transfer) saved in memory, and DMA controller will link these groups + * configurations by writing the physical address of each configuration into the + * link-list register. + * + * Just as shown below, the link-list pointer register will be pointed to the + * physical address of 'configuration 1', and the 'configuration 1' link-list + * pointer will be pointed to 'configuration 2', and so on. + * Once trigger the DMA transfer, the DMA controller will load 'configuration + * 1' to its registers automatically, after 'configuration 1' transaction is + * done, DMA controller will load 'configuration 2' automatically, until all + * DMA transactions are done. + * + * Note: The last link-list pointer should point to the physical address + * of 'configuration 1', which can avoid DMA controller loads incorrect + * configuration when the last configuration transaction is done. + * + * DMA controller linklist memory + * ====================== ----------------------- + *| | | configuration 1 |<--- + *| DMA controller | ------->| | | + *| | | | | | + *| | | | | | + *| | | | | | + *| linklist pointer reg |---- ----| linklist pointer | | + * ====================== | ----------------------- | + * | | + * | ----------------------- | + * | | configuration 2 | | + * --->| | | + * | | | + * | | | + * | | | + * ----| linklist pointer | | + * | ----------------------- | + * | | + * | ----------------------- | + * | | configuration 3 | | + * --->| | | + * | | | + * | . | | + * . | + * . | + * . | + * | . | + * | ----------------------- | + * | | configuration n | | + * --->| | | + * | | | + * | | | + * | | | + * | linklist pointer |---- + * ----------------------- + * + * To support the link-list mode, DMA slaves should allocate one segment memory + * from always-on IRAM or dma coherent memory to store these groups of DMA + * configuration, and pass the virtual and physical address to DMA controller. + */ +struct sprd_dma_linklist { + unsigned long virt_addr; + phys_addr_t phy_addr; +}; + #endif diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index 6ac3cad9aef1..34a744a1bafc 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h @@ -24,11 +24,9 @@ #ifndef _LINUX_DNS_RESOLVER_H #define _LINUX_DNS_RESOLVER_H -#ifdef __KERNEL__ +#include <uapi/linux/dns_resolver.h> extern int dns_query(const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry); -#endif /* KERNEL */ - #endif /* _LINUX_DNS_RESOLVER_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h index bffb97828ed6..1d0c9ea8825d 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -17,6 +17,7 @@ #include <linux/completion.h> #include <linux/workqueue.h> #include <linux/debugfs.h> +#include <linux/numa.h> #define EDAC_DEVICE_NAME_LEN 31 @@ -451,6 +452,8 @@ struct dimm_info { u32 nr_pages; /* number of pages on this dimm */ unsigned csrow, cschannel; /* Points to the old API data */ + + u16 smbios_handle; /* Handle for SMBIOS type 17 */ }; /** @@ -670,6 +673,6 @@ struct mem_ctl_info { /* * Maximum number of memory controllers in the coherent fabric. */ -#define EDAC_MAX_MCS 16 +#define EDAC_MAX_MCS 2 * MAX_NUMNODES #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 401e4b254e30..845174e113ce 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -672,6 +672,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) +#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) typedef struct { efi_guid_t guid; @@ -957,6 +958,7 @@ extern struct efi { unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ unsigned long tpm_log; /* TPM2 Event Log table */ + unsigned long mem_reserve; /* Linux EFI memreserve table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -1041,6 +1043,7 @@ extern int __init efi_uart_console_only (void); extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); extern void efi_mem_reserve(phys_addr_t addr, u64 size); +extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_reserve_boot_services(void); @@ -1659,7 +1662,55 @@ struct linux_efi_tpm_eventlog { extern int efi_tpm_eventlog_init(void); +/* + * efi_runtime_service() function identifiers. + * "NONE" is used by efi_recover_from_page_fault() to check if the page + * fault happened while executing an efi runtime service. + */ +enum efi_rts_ids { + NONE, + GET_TIME, + SET_TIME, + GET_WAKEUP_TIME, + SET_WAKEUP_TIME, + GET_VARIABLE, + GET_NEXT_VARIABLE, + SET_VARIABLE, + QUERY_VARIABLE_INFO, + GET_NEXT_HIGH_MONO_COUNT, + RESET_SYSTEM, + UPDATE_CAPSULE, + QUERY_CAPSULE_CAPS, +}; + +/* + * efi_runtime_work: Details of EFI Runtime Service work + * @arg<1-5>: EFI Runtime Service function arguments + * @status: Status of executing EFI Runtime Service + * @efi_rts_id: EFI Runtime Service function identifier + * @efi_rts_comp: Struct used for handling completions + */ +struct efi_runtime_work { + void *arg1; + void *arg2; + void *arg3; + void *arg4; + void *arg5; + efi_status_t status; + struct work_struct work; + enum efi_rts_ids efi_rts_id; + struct completion efi_rts_comp; +}; + +extern struct efi_runtime_work efi_rts_work; + /* Workqueue to queue EFI Runtime Services */ extern struct workqueue_struct *efi_rts_wq; +struct linux_efi_memreserve { + phys_addr_t next; + phys_addr_t base; + phys_addr_t size; +}; + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index a02deea30185..015bb59c0331 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -111,7 +111,7 @@ struct elevator_mq_ops { void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); - void (*completed_request)(struct request *); + void (*completed_request)(struct request *, u64); void (*started_request)(struct request *); void (*requeue_request)(struct request *); struct request *(*former_request)(struct request_queue *, struct request *); diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index b5f2efdd05e0..7a37f4ce9fd2 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -27,10 +27,10 @@ struct compat_elf_prstatus compat_pid_t pr_ppid; compat_pid_t pr_pgrp; compat_pid_t pr_sid; - struct compat_timeval pr_utime; - struct compat_timeval pr_stime; - struct compat_timeval pr_cutime; - struct compat_timeval pr_cstime; + struct old_timeval32 pr_utime; + struct old_timeval32 pr_stime; + struct old_timeval32 pr_cutime; + struct old_timeval32 pr_cstime; compat_elf_gregset_t pr_reg; #ifdef CONFIG_BINFMT_ELF_FDPIC compat_ulong_t pr_exec_fdpic_loadmap; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index f8a2245b70ac..afd9596ce636 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -183,14 +183,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, /** * struct ethtool_ops - optional netdev operations - * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings - * API. Get various device settings including Ethernet link - * settings. The @cmd parameter is expected to have been cleared - * before get_settings is called. Returns a negative error code - * or zero. - * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings - * API. Set various device settings including Ethernet link - * settings. Returns a negative error code or zero. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not * implemented, the @driver and @bus_info fields will be filled in @@ -297,19 +289,16 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * a TX queue has this number, return -EINVAL. If only a RX queue or a TX * queue has this number, ignore the inapplicable fields. * Returns a negative error code or zero. - * @get_link_ksettings: When defined, takes precedence over the - * %get_settings method. Get various device settings - * including Ethernet link settings. The %cmd and - * %link_mode_masks_nwords fields should be ignored (use - * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any - * change to them will be overwritten by kernel. Returns a - * negative error code or zero. - * @set_link_ksettings: When defined, takes precedence over the - * %set_settings method. Set various device settings including - * Ethernet link settings. The %cmd and %link_mode_masks_nwords - * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS - * instead of the latter), any change to them will be overwritten - * by kernel. Returns a negative error code or zero. + * @get_link_ksettings: Get various device settings including Ethernet link + * settings. The %cmd and %link_mode_masks_nwords fields should be + * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), + * any change to them will be overwritten by kernel. Returns a negative + * error code or zero. + * @set_link_ksettings: Set various device settings including Ethernet link + * settings. The %cmd and %link_mode_masks_nwords fields should be + * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), + * any change to them will be overwritten by kernel. Returns a negative + * error code or zero. * @get_fecparam: Get the network device Forward Error Correction parameters. * @set_fecparam: Set the network device Forward Error Correction parameters. * @get_ethtool_phy_stats: Return extended statistics about the PHY device. @@ -329,8 +318,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * of the generic netdev features interface. */ struct ethtool_ops { - int (*get_settings)(struct net_device *, struct ethtool_cmd *); - int (*set_settings)(struct net_device *, struct ethtool_cmd *); void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index f70f8ac9c4f4..d7711048ef93 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /** * include/linux/f2fs_fs.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _LINUX_F2FS_FS_H #define _LINUX_F2FS_FS_H @@ -112,12 +109,15 @@ struct f2fs_super_block { struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ __u8 hot_ext_count; /* # of hot file extension */ - __u8 reserved[314]; /* valid reserved region */ + __u8 reserved[310]; /* valid reserved region */ + __le32 crc; /* checksum of superblock */ } __packed; /* * For checkpoint */ +#define CP_DISABLED_FLAG 0x00001000 +#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 #define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 #define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 diff --git a/include/linux/filter.h b/include/linux/filter.h index 6791a0ac0139..91b4c934f02e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -520,24 +520,6 @@ struct bpf_skb_data_end { void *data_end; }; -struct sk_msg_buff { - void *data; - void *data_end; - __u32 apply_bytes; - __u32 cork_bytes; - int sg_copybreak; - int sg_start; - int sg_curr; - int sg_end; - struct scatterlist sg_data[MAX_SKB_FRAGS]; - bool sg_copy[MAX_SKB_FRAGS]; - __u32 flags; - struct sock *sk_redir; - struct sock *sk; - struct sk_buff *skb; - struct list_head list; -}; - struct bpf_redirect_info { u32 ifindex; u32 flags; @@ -566,6 +548,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb) cb->data_end = skb->data + skb_headlen(skb); } +/* Similar to bpf_compute_data_pointers(), except that save orginal + * data in cb->data and cb->meta_data for restore. + */ +static inline void bpf_compute_and_save_data_end( + struct sk_buff *skb, void **saved_data_end) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + *saved_data_end = cb->data_end; + cb->data_end = skb->data + skb_headlen(skb); +} + +/* Restore data saved by bpf_compute_data_pointers(). */ +static inline void bpf_restore_data_end( + struct sk_buff *skb, void *saved_data_end) +{ + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; + + cb->data_end = saved_data_end; +} + static inline u8 *bpf_skb_cb(struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta @@ -833,9 +836,6 @@ void xdp_do_flush_map(void); void bpf_warn_invalid_xdp_action(u32 act); -struct sock *do_sk_redirect_map(struct sk_buff *skb); -struct sock *do_msg_redirect_map(struct sk_msg_buff *md); - #ifdef CONFIG_INET struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index ce550fcf6360..817600a32c93 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -69,4 +69,8 @@ void fpga_bridge_free(struct fpga_bridge *br); int fpga_bridge_register(struct fpga_bridge *br); void fpga_bridge_unregister(struct fpga_bridge *br); +struct fpga_bridge +*devm_fpga_bridge_create(struct device *dev, const char *name, + const struct fpga_bridge_ops *br_ops, void *priv); + #endif /* _LINUX_FPGA_BRIDGE_H */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 8942e61f0028..e8ca62b2cb5b 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -53,12 +53,20 @@ enum fpga_mgr_states { FPGA_MGR_STATE_OPERATING, }; -/* - * FPGA Manager flags - * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported - * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting - * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first - * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed +/** + * DOC: FPGA Manager flags + * + * Flags used in the &fpga_image_info->flags field + * + * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported + * + * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting + * + * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted + * + * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first + * + * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed */ #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) @@ -190,4 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr); int fpga_mgr_register(struct fpga_manager *mgr); void fpga_mgr_unregister(struct fpga_manager *mgr); +struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name, + const struct fpga_manager_ops *mops, + void *priv); + #endif /*_LINUX_FPGA_MGR_H */ diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h index 0521b7f577a4..27cb706275db 100644 --- a/include/linux/fpga/fpga-region.h +++ b/include/linux/fpga/fpga-region.h @@ -44,4 +44,8 @@ void fpga_region_free(struct fpga_region *region); int fpga_region_register(struct fpga_region *region); void fpga_region_unregister(struct fpga_region *region); +struct fpga_region +*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *)); + #endif /* _FPGA_REGION_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index e10278e4db66..771341470bce 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1847,8 +1847,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, struct inode *inode_out, loff_t pos_out, u64 *len, bool is_dedupe); +extern int do_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, u64 len); extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, u64 len); + struct file *file_out, loff_t pos_out, u64 len); extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same); @@ -2792,19 +2794,6 @@ static inline void file_end_write(struct file *file) __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); } -static inline int do_clone_file_range(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - u64 len) -{ - int ret; - - file_start_write(file_out); - ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len); - file_end_write(file_out); - - return ret; -} - /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index f27cb14088a4..9d3f668df7df 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -351,6 +351,14 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd); #define dev_is_fsl_mc(_dev) (0) #endif +/* Macro to check if a device is a container device */ +#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \ + FSL_MC_IS_DPRC) + +/* Macro to get the container device of a MC device */ +#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \ + (_dev) : (_dev)->parent) + /* * module_fsl_mc_driver() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of @@ -405,6 +413,7 @@ extern struct device_type fsl_mc_bus_dpcon_type; extern struct device_type fsl_mc_bus_dpmcp_type; extern struct device_type fsl_mc_bus_dpmac_type; extern struct device_type fsl_mc_bus_dprtc_type; +extern struct device_type fsl_mc_bus_dpseci_type; static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) { @@ -451,6 +460,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev) return mc_dev->dev.type == &fsl_mc_bus_dprtc_type; } +static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpseci_type; +} + /* * Data Path Buffer Pool (DPBP) API * Contains initialization APIs and runtime control APIs for DPBP diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 3fdfede2f0f3..5f343b796ad9 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -274,6 +274,8 @@ */ /* Auto Boot Mode */ #define IFC_NAND_NCFGR_BOOT 0x80000000 +/* SRAM Initialization */ +#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000 /* Addressing Mode-ROW0+n/COL0 */ #define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000 /* Addressing Mode-ROW0+n/COL0+n */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 25c08c6c7f99..70fc838e6773 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -402,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part) extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part); /* block/genhd.c */ -extern void device_add_disk(struct device *parent, struct gendisk *disk); +extern void device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); static inline void add_disk(struct gendisk *disk) { - device_add_disk(NULL, disk); + device_add_disk(NULL, disk, NULL); } extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); static inline void add_disk_no_queue_reg(struct gendisk *disk) diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 21ddbe440030..f2f887795d43 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -18,10 +18,19 @@ struct device; struct gpio_desc; /** + * Opaque descriptor for a structure of GPIO array attributes. This structure + * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be + * passed back to get/set array functions in order to activate fast processing + * path if applicable. + */ +struct gpio_array; + +/** * Struct containing an array of descriptors that can be obtained using * gpiod_get_array(). */ struct gpio_descs { + struct gpio_array *info; unsigned int ndescs; struct gpio_desc *desc[]; }; @@ -30,6 +39,7 @@ struct gpio_descs { #define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) #define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) #define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3) +#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) /** * Optional flags that can be passed to one of gpiod_* to configure direction @@ -104,36 +114,46 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value); /* Value get/set from non-sleeping context */ int gpiod_get_value(const struct gpio_desc *desc); int gpiod_get_array_value(unsigned int array_size, - struct gpio_desc **desc_array, int *value_array); + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap); void gpiod_set_value(struct gpio_desc *desc, int value); -void gpiod_set_array_value(unsigned int array_size, - struct gpio_desc **desc_array, int *value_array); +int gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap); int gpiod_get_raw_value(const struct gpio_desc *desc); int gpiod_get_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array); + struct gpio_array *array_info, + unsigned long *value_bitmap); void gpiod_set_raw_value(struct gpio_desc *desc, int value); int gpiod_set_raw_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array); + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap); /* Value get/set from sleeping context */ int gpiod_get_value_cansleep(const struct gpio_desc *desc); int gpiod_get_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array); + struct gpio_array *array_info, + unsigned long *value_bitmap); void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); -void gpiod_set_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array); +int gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap); int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); int gpiod_get_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array); + struct gpio_array *array_info, + unsigned long *value_bitmap); void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); int gpiod_set_raw_array_value_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array); + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); @@ -330,7 +350,8 @@ static inline int gpiod_get_value(const struct gpio_desc *desc) } static inline int gpiod_get_array_value(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array) + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); @@ -341,12 +362,14 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value) /* GPIO can never have been requested */ WARN_ON(1); } -static inline void gpiod_set_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array) +static inline int gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); + return 0; } static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { @@ -356,7 +379,8 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc) } static inline int gpiod_get_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array) + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); @@ -368,8 +392,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) WARN_ON(1); } static inline int gpiod_set_raw_array_value(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array) + struct gpio_desc **desc_array, + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); @@ -384,7 +409,8 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) } static inline int gpiod_get_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array) + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); @@ -395,12 +421,14 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) /* GPIO can never have been requested */ WARN_ON(1); } -static inline void gpiod_set_array_value_cansleep(unsigned int array_size, +static inline int gpiod_set_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array) + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); + return 0; } static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { @@ -410,7 +438,8 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) } static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array) + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); @@ -424,7 +453,8 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, } static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_desc **desc_array, - int *value_array) + struct gpio_array *array_info, + unsigned long *value_bitmap) { /* GPIO can never have been requested */ WARN_ON(1); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 0ea328e71ec9..2db62b550b95 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -66,9 +66,15 @@ struct gpio_irq_chip { /** * @lock_key: * - * Per GPIO IRQ chip lockdep classes. + * Per GPIO IRQ chip lockdep class for IRQ lock. */ struct lock_class_key *lock_key; + + /** + * @request_key: + * + * Per GPIO IRQ chip lockdep class for IRQ request. + */ struct lock_class_key *request_key; /** @@ -95,6 +101,13 @@ struct gpio_irq_chip { unsigned int num_parents; /** + * @parent_irq: + * + * For use by gpiochip_set_cascaded_irqchip() + */ + unsigned int parent_irq; + + /** * @parents: * * A list of interrupt parents of a GPIO chip. This is owned by the @@ -138,6 +151,20 @@ struct gpio_irq_chip { * will allocate and map all IRQs during initialization. */ unsigned int first; + + /** + * @irq_enable: + * + * Store old irq_chip irq_enable callback + */ + void (*irq_enable)(struct irq_data *data); + + /** + * @irq_disable: + * + * Store old irq_chip irq_disable callback + */ + void (*irq_disable)(struct irq_data *data); }; static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip) @@ -158,9 +185,13 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip) * @free: optional hook for chip-specific deactivation, such as * disabling module power and clock; may sleep * @get_direction: returns direction for signal "offset", 0=out, 1=in, - * (same as GPIOF_DIR_XXX), or negative error + * (same as GPIOF_DIR_XXX), or negative error. + * It is recommended to always implement this function, even on + * input-only or output-only gpio chips. * @direction_input: configures signal "offset" as input, or returns error + * This can be omitted on input-only or output-only gpio chips. * @direction_output: configures signal "offset" as output, or returns error + * This can be omitted on input-only or output-only gpio chips. * @get: returns value for signal "offset", 0=low, 1=high, or negative error * @get_multiple: reads values for multiple signals defined by "mask" and * stores them in "bits", returns 0 on success or negative error @@ -256,6 +287,9 @@ struct gpio_chip { void (*dbg_show)(struct seq_file *s, struct gpio_chip *chip); + + int (*init_valid_mask)(struct gpio_chip *chip); + int base; u16 ngpio; const char *const *names; @@ -294,7 +328,9 @@ struct gpio_chip { /** * @need_valid_mask: * - * If set core allocates @valid_mask with all bits set to one. + * If set core allocates @valid_mask with all its values initialized + * with init_valid_mask() or set to one if init_valid_mask() is not + * defined */ bool need_valid_mask; @@ -395,6 +431,10 @@ extern struct gpio_chip *gpiochip_find(void *data, int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); +int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset); /* Line status inquiry for drivers */ bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); diff --git a/include/linux/hid.h b/include/linux/hid.h index d44a78362942..2827b87590d8 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1139,6 +1139,34 @@ static inline u32 hid_report_len(struct hid_report *report) int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt); + +/** + * struct hid_scroll_counter - Utility class for processing high-resolution + * scroll events. + * @dev: the input device for which events should be reported. + * @microns_per_hi_res_unit: the amount moved by the user's finger for each + * high-resolution unit reported by the mouse, in + * microns. + * @resolution_multiplier: the wheel's resolution in high-resolution mode as a + * multiple of its lower resolution. For example, if + * moving the wheel by one "notch" would result in a + * value of 1 in low-resolution mode but 8 in + * high-resolution, the multiplier is 8. + * @remainder: counts the number of high-resolution units moved since the last + * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should + * only be used by class methods. + */ +struct hid_scroll_counter { + struct input_dev *dev; + int microns_per_hi_res_unit; + int resolution_multiplier; + + int remainder; +}; + +void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter, + int hi_res_value); + /* HID quirks API */ unsigned long hid_lookup_quirk(const struct hid_device *hdev); int hid_quirks_init(char **quirks_param, __u16 bus, int count); diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 4c92e3ba3e16..dde947083d4e 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -107,7 +107,7 @@ enum hmm_pfn_flag_e { * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the - * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not + * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not * be mirrored by a device, because the entry will never have HMM_PFN_VALID * set and the pfn value is undefined. * diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 99c19b06d9a4..4663ee96cf59 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -43,7 +43,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); + pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); @@ -213,9 +213,9 @@ static inline int hpage_nr_pages(struct page *page) } struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags); + pmd_t *pmd, int flags, struct dev_pagemap **pgmap); struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, int flags); + pud_t *pud, int flags, struct dev_pagemap **pgmap); extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); @@ -344,13 +344,13 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm) } static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmd, int flags) + unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) { return NULL; } static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, - unsigned long addr, pud_t *pud, int flags) + unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) { return NULL; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6b68e345f0ca..087fd5f48c91 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, + unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pd(struct vm_area_struct *vma, @@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } +static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, + pte_t *ptep) +{ + return 0; +} + +static inline void adjust_range_if_pmd_sharing_possible( + struct vm_area_struct *vma, + unsigned long *start, unsigned long *end) +{ +} + #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index bee0827766a3..c0b93e0ff0c0 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -33,7 +33,8 @@ * and max is a multiple of 4 and >= 32 bytes. * @priv: Private data, for use by the RNG driver. * @quality: Estimation of true entropy in RNG's bitstream - * (per mill). + * (in bits of entropy per 1024 bits of input; + * valid values: 1 to 1024, or 0 for unknown). */ struct hwrng { const char *name; diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 9493d4a388db..99e0c1b0b5fb 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -118,6 +118,7 @@ enum hwmon_in_attributes { hwmon_in_max_alarm, hwmon_in_lcrit_alarm, hwmon_in_crit_alarm, + hwmon_in_enable, }; #define HWMON_I_INPUT BIT(hwmon_in_input) @@ -135,6 +136,7 @@ enum hwmon_in_attributes { #define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm) #define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm) #define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm) +#define HWMON_I_ENABLE BIT(hwmon_in_enable) enum hwmon_curr_attributes { hwmon_curr_input, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index efda23cf32c7..b3e24368930a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -739,8 +739,9 @@ struct vmbus_channel { u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ - void *ringbuffer_pages; + struct page *ringbuffer_page; u32 ringbuffer_pagecount; + u32 ringbuffer_send_offset; struct hv_ring_buffer_info outbound; /* send to parent */ struct hv_ring_buffer_info inbound; /* receive from parent */ @@ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array { struct hv_mpb_array range; } __packed; +int vmbus_alloc_ring(struct vmbus_channel *channel, + u32 send_size, u32 recv_size); +void vmbus_free_ring(struct vmbus_channel *channel); + +int vmbus_connect_ring(struct vmbus_channel *channel, + void (*onchannel_callback)(void *context), + void *context); +int vmbus_disconnect_ring(struct vmbus_channel *channel); extern int vmbus_open(struct vmbus_channel *channel, u32 send_ringbuffersize, @@ -1125,6 +1134,7 @@ struct hv_device { u16 device_id; struct device device; + char *driver_override; /* Driver name to force a match */ struct vmbus_channel *channel; struct kset *channels_kset; @@ -1442,7 +1452,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, const int *srv_version, int srv_vercnt, int *nego_fw_version, int *nego_srv_version); -void hv_process_channel_removal(u32 relid); +void hv_process_channel_removal(struct vmbus_channel *channel); void vmbus_setevent(struct vmbus_channel *channel); /* diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 9c03a7d5e400..0ef67f837ae1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1460,13 +1460,16 @@ struct ieee80211_ht_operation { * STA can receive. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest RX data rate supported. - * The top 3 bits of this field are reserved. + * The top 3 bits of this field indicate the Maximum NSTS,total + * (a beamformee capability.) * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams * @tx_highest: Indicates highest long GI VHT PPDU data rate * STA can transmit. Rate expressed in units of 1 Mbps. * If this field is 0 this value should not be used to * consider the highest TX data rate supported. - * The top 3 bits of this field are reserved. + * The top 2 bits of this field are reserved, the + * 3rd bit from the top indiciates VHT Extended NSS BW + * Capability. */ struct ieee80211_vht_mcs_info { __le16 rx_mcs_map; @@ -1475,6 +1478,13 @@ struct ieee80211_vht_mcs_info { __le16 tx_highest; } __packed; +/* for rx_highest */ +#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13 +#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT) + +/* for tx_highest */ +#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) + /** * enum ieee80211_vht_mcs_support - VHT MCS support definitions * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the @@ -1545,11 +1555,11 @@ struct ieee80211_vht_operation { * struct ieee80211_he_cap_elem - HE capabilities element * * This structure is the "HE capabilities element" fixed fields as - * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3 + * described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3 */ struct ieee80211_he_cap_elem { - u8 mac_cap_info[5]; - u8 phy_cap_info[9]; + u8 mac_cap_info[6]; + u8 phy_cap_info[11]; } __packed; #define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5 @@ -1650,6 +1660,7 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C +#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2 #define IEEE80211_VHT_CAP_RXLDPC 0x00000010 #define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020 #define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040 @@ -1659,6 +1670,7 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 #define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 #define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700 +#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 #define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13 @@ -1678,6 +1690,26 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000 #define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000 #define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000 +#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30 +#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000 + +/** + * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS + * @cap: VHT capabilities of the peer + * @bw: bandwidth to use + * @mcs: MCS index to use + * @ext_nss_bw_capable: indicates whether or not the local transmitter + * (rate scaling algorithm) can deal with the new logic + * (dot11VHTExtendedNSSBWCapable) + * + * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can + * vary for a given BW/MCS. This function parses the data. + * + * Note: This function is exported by cfg80211. + */ +int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, + enum ieee80211_vht_chanwidth bw, + int mcs, bool ext_nss_bw_capable); /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 @@ -1707,15 +1739,15 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08 #define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70 -#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70 +#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70 /* Link adaptation is split between byte HE_MAC_CAP1 and * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE @@ -1729,14 +1761,13 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01 #define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02 -#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04 +#define IEEE80211_HE_MAC_CAP2_TRS 0x04 #define IEEE80211_HE_MAC_CAP2_BSR 0x08 #define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10 #define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20 #define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40 #define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80 -#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01 #define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02 #define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04 @@ -1744,25 +1775,34 @@ struct ieee80211_mu_edca_param_set { * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the * same field in the HE capabilities. */ -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18 -#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18 -#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18 +#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20 #define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 #define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 #define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 #define IEEE80211_HE_MAC_CAP4_QTP 0x02 #define IEEE80211_HE_MAC_CAP4_BQR 0x04 -#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08 +#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08 #define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 #define IEEE80211_HE_MAC_CAP4_OPS 0x20 #define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 +/* Multi TID agg TX is split between byte #4 and #5 + * The value is a combination of B39,B40,B41 + */ +#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80 + +#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01 +#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02 +#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04 +#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 +#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 /* 802.11ax HE PHY capabilities */ -#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 @@ -1779,10 +1819,10 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10 #define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20 #define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40 -/* Midamble RX Max NSTS is split between byte #2 and byte #3 */ -#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80 +/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */ +#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80 -#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01 +#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01 #define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02 #define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04 #define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08 @@ -1883,7 +1923,19 @@ struct ieee80211_mu_edca_param_set { #define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04 #define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 #define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 -#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20 +#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0 +#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0 + +#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01 +#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02 +#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04 +#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08 +#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 +#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 /* 802.11ax HE TX/RX MCS NSS Support */ #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) @@ -1963,8 +2015,8 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 -#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000 -#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000 #define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 #define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 3d2996dc7d85..12e3eebf0ce6 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -16,9 +16,23 @@ #define __IF_TUN_H #include <uapi/linux/if_tun.h> +#include <uapi/linux/virtio_net.h> #define TUN_XDP_FLAG 0x1UL +#define TUN_MSG_UBUF 1 +#define TUN_MSG_PTR 2 +struct tun_msg_ctl { + unsigned short type; + unsigned short num; + void *ptr; +}; + +struct tun_xdp_hdr { + int buflen; + struct virtio_net_hdr gso; +}; + #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); diff --git a/include/linux/init.h b/include/linux/init.h index 2538d176dd1f..9c2aba1dbabf 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -133,7 +133,6 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry) #endif extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; -extern initcall_entry_t __security_initcall_start[], __security_initcall_end[]; /* Used for contructor calls. */ typedef void (*ctor_fn_t)(void); @@ -236,7 +235,6 @@ extern bool initcall_debug; static exitcall_t __exitcall_##fn __exit_call = fn #define console_initcall(fn) ___define_initcall(fn,, .con_initcall) -#define security_initcall(fn) ___define_initcall(fn,, .security_initcall) struct obs_kernel_param { const char *str; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 28004d74ae04..b0ae25837361 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -72,6 +72,42 @@ #define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ #define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ #define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ +#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ +#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ +#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ +#define DMAR_MTRR_FIX16K_80000_REG 0x128 +#define DMAR_MTRR_FIX16K_A0000_REG 0x130 +#define DMAR_MTRR_FIX4K_C0000_REG 0x138 +#define DMAR_MTRR_FIX4K_C8000_REG 0x140 +#define DMAR_MTRR_FIX4K_D0000_REG 0x148 +#define DMAR_MTRR_FIX4K_D8000_REG 0x150 +#define DMAR_MTRR_FIX4K_E0000_REG 0x158 +#define DMAR_MTRR_FIX4K_E8000_REG 0x160 +#define DMAR_MTRR_FIX4K_F0000_REG 0x168 +#define DMAR_MTRR_FIX4K_F8000_REG 0x170 +#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ +#define DMAR_MTRR_PHYSMASK0_REG 0x188 +#define DMAR_MTRR_PHYSBASE1_REG 0x190 +#define DMAR_MTRR_PHYSMASK1_REG 0x198 +#define DMAR_MTRR_PHYSBASE2_REG 0x1a0 +#define DMAR_MTRR_PHYSMASK2_REG 0x1a8 +#define DMAR_MTRR_PHYSBASE3_REG 0x1b0 +#define DMAR_MTRR_PHYSMASK3_REG 0x1b8 +#define DMAR_MTRR_PHYSBASE4_REG 0x1c0 +#define DMAR_MTRR_PHYSMASK4_REG 0x1c8 +#define DMAR_MTRR_PHYSBASE5_REG 0x1d0 +#define DMAR_MTRR_PHYSMASK5_REG 0x1d8 +#define DMAR_MTRR_PHYSBASE6_REG 0x1e0 +#define DMAR_MTRR_PHYSMASK6_REG 0x1e8 +#define DMAR_MTRR_PHYSBASE7_REG 0x1f0 +#define DMAR_MTRR_PHYSMASK7_REG 0x1f8 +#define DMAR_MTRR_PHYSBASE8_REG 0x200 +#define DMAR_MTRR_PHYSMASK8_REG 0x208 +#define DMAR_MTRR_PHYSBASE9_REG 0x210 +#define DMAR_MTRR_PHYSMASK9_REG 0x218 +#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */ +#define DMAR_VCMD_REG 0xe10 /* Virtual command register */ +#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ #define OFFSET_STRIDE (9) @@ -389,6 +425,33 @@ struct pasid_entry; struct pasid_state_entry; struct page_req_dsc; +/* + * 0: Present + * 1-11: Reserved + * 12-63: Context Ptr (12 - (haw-1)) + * 64-127: Reserved + */ +struct root_entry { + u64 lo; + u64 hi; +}; + +/* + * low 64 bits: + * 0: present + * 1: fault processing disable + * 2-3: translation type + * 12-63: address space root + * high 64 bits: + * 0-2: address width + * 3-6: aval + * 8-23: domain id + */ +struct context_entry { + u64 lo; + u64 hi; +}; + struct dmar_domain { int nid; /* node id */ @@ -558,6 +621,15 @@ extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_ extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); #endif +#ifdef CONFIG_INTEL_IOMMU_DEBUGFS +void intel_iommu_debugfs_init(void); +#else +static inline void intel_iommu_debugfs_init(void) {} +#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ + extern const struct attribute_group *intel_iommu_groups[]; +bool context_present(struct context_entry *context); +struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, + u8 devfn, int alloc); #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index eeceac3376fc..1d6711c28271 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -45,7 +45,7 @@ * IRQF_PERCPU - Interrupt is per cpu * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is - * registered first in an shared interrupt is considered for + * registered first in a shared interrupt is considered for * performance reasons) * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 3555d54bf79a..9a4258154b25 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -6,6 +6,7 @@ #include <linux/bitmap.h> #include <linux/mm.h> #include <linux/types.h> +#include <linux/mm_types.h> struct address_space; struct fiemap_extent_info; @@ -141,7 +142,8 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); -int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); +vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, + const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, const struct iomap_ops *ops); loff_t iomap_seek_hole(struct inode *inode, loff_t offset, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 87994c265bf5..a1d28f42cb77 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -124,6 +124,7 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ + DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, DOMAIN_ATTR_MAX, }; @@ -181,8 +182,6 @@ struct iommu_resv_region { * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @domain_window_enable: Configure and enable a particular window for a domain * @domain_window_disable: Disable a particular window for a domain - * @domain_set_windows: Set the number of windows for a domain - * @domain_get_windows: Return the number of windows for a domain * @of_xlate: add OF master IDs to iommu grouping * @pgsize_bitmap: bitmap of all possible supported page sizes */ @@ -223,10 +222,6 @@ struct iommu_ops { int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot); void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); - /* Set the number of windows per domain */ - int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); - /* Get the number of windows per domain */ - u32 (*domain_get_windows)(struct iommu_domain *domain); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); @@ -293,6 +288,7 @@ extern int iommu_attach_device(struct iommu_domain *domain, extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); +extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, @@ -377,6 +373,8 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain) extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ extern struct iommu_group *generic_device_group(struct device *dev); +/* FSL-MC device grouping function */ +struct iommu_group *fsl_mc_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data diff --git a/include/linux/iova.h b/include/linux/iova.h index 928442dda565..0b93bf96693e 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -75,6 +75,7 @@ struct iova_domain { unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; + unsigned long max32_alloc_size; /* Size of last failed allocation */ struct iova anchor; /* rbtree lookup anchor */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 41f5c086f670..ef61676cfe05 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -27,7 +27,7 @@ struct device; * Opaque type for a IPMI message user. One of these is needed to * send and receive messages. */ -typedef struct ipmi_user *ipmi_user_t; +struct ipmi_user; /* * Stuff coming from the receive interface comes as one of these. diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 7d5fd38d5282..8c4e2ab696c3 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -28,7 +28,7 @@ struct device; */ /* Structure for the low-level drivers. */ -typedef struct ipmi_smi *ipmi_smi_t; +struct ipmi_smi; /* * Messages to/from the lower layer. The smi interface will take one diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 8415bf1a9776..495e834c1367 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -274,7 +274,8 @@ struct ipv6_pinfo { */ dontfrag:1, autoflowlabel:1, - autoflowlabel_set:1; + autoflowlabel_set:1, + mc_all:1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; diff --git a/include/linux/irq.h b/include/linux/irq.h index 201de12a9957..c9bffda04a45 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m); void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); -int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); +int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, + unsigned int *mapped_cpu); void irq_matrix_reserve(struct irq_matrix *m); void irq_matrix_remove_reserved(struct irq_matrix *m); int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index 0a83b4379f34..9a1a479a2bf4 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -13,6 +13,12 @@ #include <linux/types.h> #include <linux/ioport.h> +#define GICD_INT_DEF_PRI 0xa0 +#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ + (GICD_INT_DEF_PRI << 16) |\ + (GICD_INT_DEF_PRI << 8) |\ + GICD_INT_DEF_PRI) + enum gic_type { GIC_V2, GIC_V3, diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 8bdbb5f29494..071b4cbdf010 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -357,6 +357,8 @@ #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) #define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + #define GITS_BASER_NR_REGS 8 #define GITS_BASER_VALID (1ULL << 63) @@ -388,6 +390,9 @@ #define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) #define GITS_BASER_PHYS_52_to_48(phys) \ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + #define GITS_BASER_SHAREABILITY_SHIFT (10) #define GITS_BASER_InnerShareable \ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) @@ -585,8 +590,10 @@ struct rdists { void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; + bool lpi_enabled; } __percpu *rdist; - struct page *prop_page; + phys_addr_t prop_table_pa; + void *prop_table_va; u64 flags; u32 gicd_typer; bool has_vlpis; diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 6c4aaf04046c..626179077bb0 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -65,11 +65,6 @@ #define GICD_INT_EN_CLR_X32 0xffffffff #define GICD_INT_EN_SET_SGI 0x0000ffff #define GICD_INT_EN_CLR_PPI 0xffff0000 -#define GICD_INT_DEF_PRI 0xa0 -#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\ - (GICD_INT_DEF_PRI << 16) |\ - (GICD_INT_DEF_PRI << 8) |\ - GICD_INT_DEF_PRI) #define GICD_IIDR_IMPLEMENTER_SHIFT 0 #define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index dccfa65aee96..068aa46f0d55 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -75,6 +75,7 @@ struct irq_fwspec { enum irq_domain_bus_token { DOMAIN_BUS_ANY = 0, DOMAIN_BUS_WIRED, + DOMAIN_BUS_GENERIC_MSI, DOMAIN_BUS_PCI_MSI, DOMAIN_BUS_PLATFORM_MSI, DOMAIN_BUS_NEXUS, diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 1a0b6f17a5d6..5df6a621e464 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -119,6 +119,68 @@ struct static_key { #ifdef HAVE_JUMP_LABEL #include <asm/jump_label.h> + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE + +struct jump_entry { + s32 code; + s32 target; + long key; // key may be far away from the core kernel under KASLR +}; + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return (unsigned long)&entry->code + entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return (unsigned long)&entry->target + entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + long offset = entry->key & ~3L; + + return (struct static_key *)((unsigned long)&entry->key + offset); +} + +#else + +static inline unsigned long jump_entry_code(const struct jump_entry *entry) +{ + return entry->code; +} + +static inline unsigned long jump_entry_target(const struct jump_entry *entry) +{ + return entry->target; +} + +static inline struct static_key *jump_entry_key(const struct jump_entry *entry) +{ + return (struct static_key *)((unsigned long)entry->key & ~3UL); +} + +#endif + +static inline bool jump_entry_is_branch(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 1UL; +} + +static inline bool jump_entry_is_init(const struct jump_entry *entry) +{ + return (unsigned long)entry->key & 2UL; +} + +static inline void jump_entry_set_init(struct jump_entry *entry) +{ + entry->key |= 2; +} + +#endif #endif #ifndef __ASSEMBLY__ @@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); -extern void jump_label_invalidate_initmem(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, @@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void) static_key_initialized = true; } -static inline void jump_label_invalidate_initmem(void) {} - static __always_inline bool static_key_false(struct static_key *key) { if (unlikely(static_key_count(key) > 0)) diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 814643f7ee52..5b36b1287a5a 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -477,10 +477,11 @@ static inline void kernfs_init(void) { } * @buf: buffer to copy @kn's name into * @buflen: size of @buf * - * Builds and returns the full path of @kn in @buf of @buflen bytes. The - * path is built from the end of @buf so the returned pointer usually - * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated - * and %NULL is returned. + * If @kn is NULL result will be "(null)". + * + * Returns the length of the full path. If the full length is equal to or + * greater than @buflen, @buf contains the truncated path with the trailing + * '\0'. On error, -errno is returned. */ static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) { diff --git a/include/linux/leds.h b/include/linux/leds.h index 834683d603f9..7393a316d9fa 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -22,6 +22,7 @@ #include <linux/workqueue.h> struct device; +struct led_pattern; /* * LED Core */ @@ -88,6 +89,10 @@ struct led_classdev { unsigned long *delay_on, unsigned long *delay_off); + int (*pattern_set)(struct led_classdev *led_cdev, + struct led_pattern *pattern, u32 len, int repeat); + int (*pattern_clear)(struct led_classdev *led_cdev); + struct device *dev; const struct attribute_group **groups; @@ -472,4 +477,14 @@ static inline void led_classdev_notify_brightness_hw_changed( struct led_classdev *led_cdev, enum led_brightness brightness) { } #endif +/** + * struct led_pattern - pattern interval settings + * @delta_t: pattern interval delay, in milliseconds + * @brightness: pattern interval brightness + */ +struct led_pattern { + u32 delta_t; + int brightness; +}; + #endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index c6ac1fe7ec68..edb0f0c30904 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -2,6 +2,7 @@ #ifndef LIBFDT_ENV_H #define LIBFDT_ENV_H +#include <linux/kernel.h> /* For INT_MAX */ #include <linux/string.h> #include <asm/byteorder.h> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index e9e0d1c7eaf5..2fdeac1a420d 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -86,8 +86,8 @@ struct nvm_chk_meta; typedef int (nvm_id_fn)(struct nvm_dev *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); -typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *, - sector_t, int); +typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, + struct nvm_chk_meta *); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); @@ -305,6 +305,8 @@ struct nvm_rq { u64 ppa_status; /* ppa media status */ int error; + int is_seq; /* Sequential hint flag. 1.2 only */ + void *private; }; @@ -318,6 +320,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) return rqdata + 1; } +static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) +{ + return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; +} + enum { NVM_BLK_ST_FREE = 0x1, /* Free block */ NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ @@ -485,6 +492,144 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, return l; } +static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf, + struct ppa_addr p) +{ + struct nvm_geo *geo = &dev->geo; + u64 caddr; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf; + + caddr = (u64)p.g.pg << ppaf->pg_offset; + caddr |= (u64)p.g.pl << ppaf->pln_offset; + caddr |= (u64)p.g.sec << ppaf->sec_offset; + } else { + caddr = p.m.sec; + } + + return caddr; +} + +static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev, + void *addrf, u32 ppa32) +{ + struct ppa_addr ppa64; + + ppa64.ppa = 0; + + if (ppa32 == -1) { + ppa64.ppa = ADDR_EMPTY; + } else if (ppa32 & (1U << 31)) { + ppa64.c.line = ppa32 & ((~0U) >> 1); + ppa64.c.is_cached = 1; + } else { + struct nvm_geo *geo = &dev->geo; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = addrf; + + ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> + ppaf->ch_offset; + ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> + ppaf->lun_offset; + ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> + ppaf->blk_offset; + ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> + ppaf->pg_offset; + ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> + ppaf->pln_offset; + ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> + ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = addrf; + + ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> + lbaf->ch_offset; + ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> + lbaf->lun_offset; + ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> + lbaf->chk_offset; + ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> + lbaf->sec_offset; + } + } + + return ppa64; +} + +static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev, + void *addrf, struct ppa_addr ppa64) +{ + u32 ppa32 = 0; + + if (ppa64.ppa == ADDR_EMPTY) { + ppa32 = ~0U; + } else if (ppa64.c.is_cached) { + ppa32 |= ppa64.c.line; + ppa32 |= 1U << 31; + } else { + struct nvm_geo *geo = &dev->geo; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = addrf; + + ppa32 |= ppa64.g.ch << ppaf->ch_offset; + ppa32 |= ppa64.g.lun << ppaf->lun_offset; + ppa32 |= ppa64.g.blk << ppaf->blk_offset; + ppa32 |= ppa64.g.pg << ppaf->pg_offset; + ppa32 |= ppa64.g.pl << ppaf->pln_offset; + ppa32 |= ppa64.g.sec << ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = addrf; + + ppa32 |= ppa64.m.grp << lbaf->ch_offset; + ppa32 |= ppa64.m.pu << lbaf->lun_offset; + ppa32 |= ppa64.m.chk << lbaf->chk_offset; + ppa32 |= ppa64.m.sec << lbaf->sec_offset; + } + } + + return ppa32; +} + +static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, + struct ppa_addr *ppa) +{ + struct nvm_geo *geo = &dev->geo; + int last = 0; + + if (geo->version == NVM_OCSSD_SPEC_12) { + int sec = ppa->g.sec; + + sec++; + if (sec == geo->ws_min) { + int pg = ppa->g.pg; + + sec = 0; + pg++; + if (pg == geo->num_pg) { + int pl = ppa->g.pl; + + pg = 0; + pl++; + if (pl == geo->num_pln) + last = 1; + + ppa->g.pl = pl; + } + ppa->g.pg = pg; + } + ppa->g.sec = sec; + } else { + ppa->m.sec++; + if (ppa->m.sec == geo->clba) + last = 1; + } + + return last; +} + typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, @@ -493,9 +638,15 @@ typedef void (nvm_tgt_exit_fn)(void *, bool); typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); +enum { + NVM_TGT_F_DEV_L2P = 0, + NVM_TGT_F_HOST_L2P = 1 << 0, +}; + struct nvm_tgt_type { const char *name; unsigned int version[3]; + int flags; /* target entry points */ nvm_tgt_make_rq_fn *make_rq; @@ -524,18 +675,13 @@ extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); - -extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, - struct nvm_chk_meta *meta, struct ppa_addr ppa, - int nchks); - -extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, +extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, + int, struct nvm_chk_meta *); +extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, int, int); extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); extern void nvm_end_io(struct nvm_rq *); -extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); -extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/linux/linkage.h b/include/linux/linkage.h index d7618c41f74c..7c47b1a471d4 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -90,6 +90,7 @@ #ifndef WEAK #define WEAK(name) \ .weak name ASM_NL \ + ALIGN ASM_NL \ name: #endif diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h new file mode 100644 index 000000000000..22443d7fb5cd --- /dev/null +++ b/include/linux/linkmode.h @@ -0,0 +1,76 @@ +#ifndef __LINKMODE_H +#define __LINKMODE_H + +#include <linux/bitmap.h> +#include <linux/ethtool.h> +#include <uapi/linux/ethtool.h> + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) +{ + bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_and(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_or(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline bool linkmode_empty(const unsigned long *src) +{ + return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) +{ + __set_bit(nr, addr); +} + +static inline void linkmode_set_bit_array(const int *array, int array_size, + unsigned long *addr) +{ + int i; + + for (i = 0; i < array_size; i++) + linkmode_set_bit(array[i], addr); +} + +static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) +{ + __clear_bit(nr, addr); +} + +static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) +{ + __change_bit(nr, addr); +} + +static inline int linkmode_test_bit(int nr, volatile unsigned long *addr) +{ + return test_bit(nr, addr); +} + +static inline int linkmode_equal(const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +#endif /* __LINKMODE_H */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b0d0b51c4d85..1fd82ff99c65 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -99,13 +99,8 @@ struct lock_class { */ unsigned int version; - /* - * Statistics counter: - */ - unsigned long ops; - - const char *name; int name_version; + const char *name; #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 97a020c616ad..aaeb7fa24dc4 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -672,7 +672,7 @@ * Return 0 if permission is granted. * @task_kill: * Check permission before sending signal @sig to @p. @info can be NULL, - * the constant 1, or a pointer to a siginfo structure. If @info is 1 or + * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming * from the kernel and should typically be permitted. * SIGIO signals are handled separately by the send_sigiotask hook in @@ -1606,7 +1606,7 @@ union security_list_options { int (*task_setscheduler)(struct task_struct *p); int (*task_getscheduler)(struct task_struct *p); int (*task_movememory)(struct task_struct *p); - int (*task_kill)(struct task_struct *p, struct siginfo *info, + int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred); int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -2039,6 +2039,18 @@ extern char *lsm_names; extern void security_add_hooks(struct security_hook_list *hooks, int count, char *lsm); +struct lsm_info { + const char *name; /* Required. */ + int (*init)(void); /* Required. */ +}; + +extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; + +#define DEFINE_LSM(lsm) \ + static struct lsm_info __lsm_##lsm \ + __used __section(.lsm_info.init) \ + __aligned(sizeof(unsigned long)) + #ifdef CONFIG_SECURITY_SELINUX_DISABLE /* * Assuring the safety of deleting a security module is up to diff --git a/include/linux/math64.h b/include/linux/math64.h index 837f2f2d1d34..bb2c84afb80c 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -281,4 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */ +#define DIV64_U64_ROUND_UP(ll, d) \ + ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) + #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 516920549378..2acdd046df2d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -265,21 +265,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ nid, flags, p_start, p_end, p_nid) -/** - * for_each_resv_unavail_range - iterate through reserved and unavailable memory - * @i: u64 used as loop variable - * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL - * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL - * - * Walks over unavailable but reserved (reserved && !memory) areas of memblock. - * Available as soon as memblock is initialized. - * Note: because this memory does not belong to any physical node, flags and - * nid arguments do not make sense and thus not exported as arguments. - */ -#define for_each_resv_unavail_range(i, p_start, p_end) \ - for_each_mem_range(i, &memblock.reserved, &memblock.memory, \ - NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL) - static inline void memblock_set_region_flags(struct memblock_region *r, enum memblock_flags flags) { diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 652f602167df..7ab2120155a4 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -78,7 +78,7 @@ struct mem_cgroup_reclaim_cookie { struct mem_cgroup_id { int id; - atomic_t ref; + refcount_t ref; }; /* @@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); void memcg_kmem_put_cache(struct kmem_cache *cachep); int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, struct mem_cgroup *memcg); + +#ifdef CONFIG_MEMCG_KMEM int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); void memcg_kmem_uncharge(struct page *page, int order); -#ifdef CONFIG_MEMCG_KMEM extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; @@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id); extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); #else + +static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +{ + return 0; +} + +static inline void memcg_kmem_uncharge(struct page *page, int order) +{ +} + #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index f91f9e763557..0ac69ddf5fc4 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -53,11 +53,16 @@ struct vmem_altmap { * wakeup event whenever a page is unpinned and becomes idle. This * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). + * + * MEMORY_DEVICE_PCI_P2PDMA: + * Device memory residing in a PCI BAR intended for use with Peer-to-Peer + * transactions. */ enum memory_type { MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_PUBLIC, MEMORY_DEVICE_FS_DAX, + MEMORY_DEVICE_PCI_P2PDMA, }; /* @@ -120,6 +125,7 @@ struct dev_pagemap { struct device *dev; void *data; enum memory_type type; + u64 pci_p2pdma_bus_offset; }; #ifdef CONFIG_ZONE_DEVICE diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 6e1ab9bead28..5fd0e429f472 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -2132,6 +2132,7 @@ struct ec_response_get_next_event_v1 { /* Switches */ #define EC_MKBP_LID_OPEN 0 #define EC_MKBP_TABLET_MODE 1 +#define EC_MKBP_BASE_ATTACHED 2 /*****************************************************************************/ /* Temperature sensor commands */ @@ -3102,6 +3103,16 @@ struct ec_params_usb_pd_info_request { uint8_t port; } __packed; +/* + * This command will return the number of USB PD charge port + the number + * of dedicated port present. + * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports + */ +#define EC_CMD_CHARGE_PORT_COUNT 0x0105 +struct ec_response_charge_port_count { + uint8_t port_count; +} __packed; + /* Read USB-PD Device discovery info */ #define EC_CMD_USB_PD_DISCOVERY 0x0113 struct ec_params_usb_pd_discovery_entry { diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h new file mode 100644 index 000000000000..ab16ad283def --- /dev/null +++ b/include/linux/mfd/ingenic-tcu.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file for the Ingenic JZ47xx TCU driver + */ +#ifndef __LINUX_MFD_INGENIC_TCU_H_ +#define __LINUX_MFD_INGENIC_TCU_H_ + +#include <linux/bitops.h> + +#define TCU_REG_WDT_TDR 0x00 +#define TCU_REG_WDT_TCER 0x04 +#define TCU_REG_WDT_TCNT 0x08 +#define TCU_REG_WDT_TCSR 0x0c +#define TCU_REG_TER 0x10 +#define TCU_REG_TESR 0x14 +#define TCU_REG_TECR 0x18 +#define TCU_REG_TSR 0x1c +#define TCU_REG_TFR 0x20 +#define TCU_REG_TFSR 0x24 +#define TCU_REG_TFCR 0x28 +#define TCU_REG_TSSR 0x2c +#define TCU_REG_TMR 0x30 +#define TCU_REG_TMSR 0x34 +#define TCU_REG_TMCR 0x38 +#define TCU_REG_TSCR 0x3c +#define TCU_REG_TDFR0 0x40 +#define TCU_REG_TDHR0 0x44 +#define TCU_REG_TCNT0 0x48 +#define TCU_REG_TCSR0 0x4c +#define TCU_REG_OST_DR 0xe0 +#define TCU_REG_OST_CNTL 0xe4 +#define TCU_REG_OST_CNTH 0xe8 +#define TCU_REG_OST_TCSR 0xec +#define TCU_REG_TSTR 0xf0 +#define TCU_REG_TSTSR 0xf4 +#define TCU_REG_TSTCR 0xf8 +#define TCU_REG_OST_CNTHBUF 0xfc + +#define TCU_TCSR_RESERVED_BITS 0x3f +#define TCU_TCSR_PARENT_CLOCK_MASK 0x07 +#define TCU_TCSR_PRESCALE_LSB 3 +#define TCU_TCSR_PRESCALE_MASK 0x38 + +#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ +#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ +#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ + +#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */ + +#define TCU_CHANNEL_STRIDE 0x10 +#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE)) +#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE)) +#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE)) +#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE)) + +#endif /* __LINUX_MFD_INGENIC_TCU_H_ */ diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h index 439a7a617bc9..317e8608cf41 100644 --- a/include/linux/mfd/intel_msic.h +++ b/include/linux/mfd/intel_msic.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC + * Core interface for Intel MSIC * * Copyright (C) 2011, Intel Corporation * Author: Mika Westerberg <mika.westerberg@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_MFD_INTEL_MSIC_H__ diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index 5aacdb017a9f..ed1dfba5e5f9 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * intel_soc_pmic.h - Intel SoC PMIC Driver + * Intel SoC PMIC Driver * * Copyright (C) 2012-2014 Intel Corporation. All rights reserved. * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Author: Yang, Bin <bin.yang@intel.com> * Author: Zhu, Lejun <lejun.zhu@linux.intel.com> */ @@ -25,6 +17,7 @@ struct intel_soc_pmic { int irq; struct regmap *regmap; struct regmap_irq_chip_data *irq_chip_data; + struct regmap_irq_chip_data *irq_chip_data_pwrbtn; struct regmap_irq_chip_data *irq_chip_data_tmu; struct regmap_irq_chip_data *irq_chip_data_bcu; struct regmap_irq_chip_data *irq_chip_data_adc; diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h index 0c351bc85d2d..9be566cc58c6 100644 --- a/include/linux/mfd/intel_soc_pmic_bxtwc.h +++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Header file for Intel Broxton Whiskey Cove PMIC * * Copyright (C) 2015 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #ifndef __INTEL_BXTWC_H__ diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h index c332681848ef..fe69c0f4398f 100644 --- a/include/linux/mfd/madera/core.h +++ b/include/linux/mfd/madera/core.h @@ -148,6 +148,7 @@ struct snd_soc_dapm_context; * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1 * @pdata: our pdata * @irq_dev: the irqchip child driver device + * @irq_data: pointer to irqchip data for the child irqchip driver * @irq: host irq number from SPI or I2C configuration * @out_clamp: indicates output clamp state for each analogue output * @out_shorted: indicates short circuit state for each analogue output @@ -175,6 +176,7 @@ struct madera { struct madera_pdata pdata; struct device *irq_dev; + struct regmap_irq_chip_data *irq_data; int irq; unsigned int num_micbias; diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index 0b311f39c8f4..8dc852402dbb 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -24,7 +24,6 @@ struct gpio_desc; struct pinctrl_map; -struct madera_irqchip_pdata; struct madera_codec_pdata; /** diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h index df75234f979d..a21374f8ad26 100644 --- a/include/linux/mfd/max14577-private.h +++ b/include/linux/mfd/max14577-private.h @@ -1,19 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip * * Copyright (C) 2014 Samsung Electrnoics * Chanwoo Choi <cw00.choi@samsung.com> * Krzysztof Kozlowski <krzk@kernel.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __MAX14577_PRIVATE_H__ diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h index d81b52bb8bee..8b3ef891ba42 100644 --- a/include/linux/mfd/max14577.h +++ b/include/linux/mfd/max14577.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max14577.h - Driver for the Maxim 14577/77836 * @@ -5,16 +6,6 @@ * Chanwoo Choi <cw00.choi@samsung.com> * Krzysztof Kozlowski <krzk@kernel.org> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * This driver is based on max8997.h * * MAX14577 has MUIC, Charger devices. diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index 643dae777b43..833e578e051e 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77686-private.h - Voltage regulator driver for the Maxim 77686/802 * * Copyright (C) 2012 Samsung Electrnoics * Chiwoong Byun <woong.byun@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX77686_PRIV_H diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index d4b72d519115..d0fb510875e6 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77686.h - Driver for the Maxim 77686/802 * * Copyright (C) 2012 Samsung Electrnoics * Chiwoong Byun <woong.byun@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * This driver is based on max8997.h * * MAX77686 has PMIC, RTC devices. diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h index 095b121aa725..a5bce099f1ed 100644 --- a/include/linux/mfd/max77693-common.h +++ b/include/linux/mfd/max77693-common.h @@ -1,12 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common data shared between Maxim 77693 and 77843 drivers * * Copyright (C) 2015 Samsung Electronics - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __LINUX_MFD_MAX77693_COMMON_H diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 3c7a63b98ad6..e798c81aec31 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77693-private.h - Voltage regulator driver for the Maxim 77693 * @@ -5,20 +6,6 @@ * SangYoung Son <hello.son@samsung.com> * * This program is not provided / owned by Maxim Integrated Products. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX77693_PRIV_H diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h index d450f687301b..c67c16ba8649 100644 --- a/include/linux/mfd/max77693.h +++ b/include/linux/mfd/max77693.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max77693.h - Driver for the Maxim 77693 * @@ -6,20 +7,6 @@ * * This program is not provided / owned by Maxim Integrated Products. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * This driver is based on max8997.h * * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices. diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h index b8908bf8d315..0bc7454c4dbe 100644 --- a/include/linux/mfd/max77843-private.h +++ b/include/linux/mfd/max77843-private.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Common variables for the Maxim MAX77843 driver * * Copyright (C) 2015 Samsung Electronics * Author: Jaewon Kim <jaewon02.kim@samsung.com> * Author: Beomho Seo <beomho.seo@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __MAX77843_PRIVATE_H_ diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h index 78c76cd4d37b..a10cd6945232 100644 --- a/include/linux/mfd/max8997-private.h +++ b/include/linux/mfd/max8997-private.h @@ -1,22 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8997-private.h - Voltage regulator driver for the Maxim 8997 * * Copyright (C) 2010 Samsung Electrnoics * MyungJoo Ham <myungjoo.ham@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX8997_PRIV_H diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h index cf815577bd68..e955e2f0a2cc 100644 --- a/include/linux/mfd/max8997.h +++ b/include/linux/mfd/max8997.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8997.h - Driver for the Maxim 8997/8966 * * Copyright (C) 2009-2010 Samsung Electrnoics * MyungJoo Ham <myungjoo.ham@samsung.com> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * This driver is based on max8998.h * * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices. @@ -178,7 +165,6 @@ struct max8997_led_platform_data { struct max8997_platform_data { /* IRQ */ int ono; - int wakeup; /* ---- PMIC ---- */ struct max8997_regulator_data *regulators; diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h index d68ada502ff3..6deb5f577602 100644 --- a/include/linux/mfd/max8998-private.h +++ b/include/linux/mfd/max8998-private.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8998-private.h - Voltage regulator driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electrnoics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX8998_PRIV_H diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h index e3956a654cbc..061af220dcd3 100644 --- a/include/linux/mfd/max8998.h +++ b/include/linux/mfd/max8998.h @@ -1,23 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * max8998.h - Voltage regulator driver for the Maxim 8998 * * Copyright (C) 2009-2010 Samsung Electrnoics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_MFD_MAX8998_H diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h index 54a3cd808f9e..2ad9bdc0a5ec 100644 --- a/include/linux/mfd/mc13xxx.h +++ b/include/linux/mfd/mc13xxx.h @@ -249,6 +249,7 @@ struct mc13xxx_platform_data { #define MC13XXX_ADC0_TSMOD0 (1 << 12) #define MC13XXX_ADC0_TSMOD1 (1 << 13) #define MC13XXX_ADC0_TSMOD2 (1 << 14) +#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15) #define MC13XXX_ADC0_ADINC1 (1 << 16) #define MC13XXX_ADC0_ADINC2 (1 << 17) diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index e8338e5dc10b..fd194bfc836f 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h @@ -1,112 +1,127 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Copyright (C) 2018 ROHM Semiconductors */ -#ifndef __LINUX_MFD_BD71837_H__ -#define __LINUX_MFD_BD71837_H__ +#ifndef __LINUX_MFD_BD718XX_H__ +#define __LINUX_MFD_BD718XX_H__ #include <linux/regmap.h> enum { - BD71837_BUCK1 = 0, - BD71837_BUCK2, - BD71837_BUCK3, - BD71837_BUCK4, - BD71837_BUCK5, - BD71837_BUCK6, - BD71837_BUCK7, - BD71837_BUCK8, - BD71837_LDO1, - BD71837_LDO2, - BD71837_LDO3, - BD71837_LDO4, - BD71837_LDO5, - BD71837_LDO6, - BD71837_LDO7, - BD71837_REGULATOR_CNT, + BD718XX_TYPE_BD71837 = 0, + BD718XX_TYPE_BD71847, + BD718XX_TYPE_AMOUNT }; -#define BD71837_BUCK1_VOLTAGE_NUM 0x40 -#define BD71837_BUCK2_VOLTAGE_NUM 0x40 -#define BD71837_BUCK3_VOLTAGE_NUM 0x40 -#define BD71837_BUCK4_VOLTAGE_NUM 0x40 +enum { + BD718XX_BUCK1 = 0, + BD718XX_BUCK2, + BD718XX_BUCK3, + BD718XX_BUCK4, + BD718XX_BUCK5, + BD718XX_BUCK6, + BD718XX_BUCK7, + BD718XX_BUCK8, + BD718XX_LDO1, + BD718XX_LDO2, + BD718XX_LDO3, + BD718XX_LDO4, + BD718XX_LDO5, + BD718XX_LDO6, + BD718XX_LDO7, + BD718XX_REGULATOR_AMOUNT, +}; + +/* Common voltage configurations */ +#define BD718XX_DVS_BUCK_VOLTAGE_NUM 0x3D +#define BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM 0x3D -#define BD71837_BUCK5_VOLTAGE_NUM 0x08 +#define BD718XX_LDO1_VOLTAGE_NUM 0x08 +#define BD718XX_LDO2_VOLTAGE_NUM 0x02 +#define BD718XX_LDO3_VOLTAGE_NUM 0x10 +#define BD718XX_LDO4_VOLTAGE_NUM 0x0A +#define BD718XX_LDO6_VOLTAGE_NUM 0x0A + +/* BD71837 specific voltage configurations */ +#define BD71837_BUCK5_VOLTAGE_NUM 0x10 #define BD71837_BUCK6_VOLTAGE_NUM 0x04 #define BD71837_BUCK7_VOLTAGE_NUM 0x08 -#define BD71837_BUCK8_VOLTAGE_NUM 0x40 - -#define BD71837_LDO1_VOLTAGE_NUM 0x04 -#define BD71837_LDO2_VOLTAGE_NUM 0x02 -#define BD71837_LDO3_VOLTAGE_NUM 0x10 -#define BD71837_LDO4_VOLTAGE_NUM 0x10 #define BD71837_LDO5_VOLTAGE_NUM 0x10 -#define BD71837_LDO6_VOLTAGE_NUM 0x10 #define BD71837_LDO7_VOLTAGE_NUM 0x10 +/* BD71847 specific voltage configurations */ +#define BD71847_BUCK3_VOLTAGE_NUM 0x18 +#define BD71847_BUCK4_VOLTAGE_NUM 0x08 +#define BD71847_LDO5_VOLTAGE_NUM 0x20 + +/* Registers specific to BD71837 */ enum { - BD71837_REG_REV = 0x00, - BD71837_REG_SWRESET = 0x01, - BD71837_REG_I2C_DEV = 0x02, - BD71837_REG_PWRCTRL0 = 0x03, - BD71837_REG_PWRCTRL1 = 0x04, - BD71837_REG_BUCK1_CTRL = 0x05, - BD71837_REG_BUCK2_CTRL = 0x06, - BD71837_REG_BUCK3_CTRL = 0x07, - BD71837_REG_BUCK4_CTRL = 0x08, - BD71837_REG_BUCK5_CTRL = 0x09, - BD71837_REG_BUCK6_CTRL = 0x0A, - BD71837_REG_BUCK7_CTRL = 0x0B, - BD71837_REG_BUCK8_CTRL = 0x0C, - BD71837_REG_BUCK1_VOLT_RUN = 0x0D, - BD71837_REG_BUCK1_VOLT_IDLE = 0x0E, - BD71837_REG_BUCK1_VOLT_SUSP = 0x0F, - BD71837_REG_BUCK2_VOLT_RUN = 0x10, - BD71837_REG_BUCK2_VOLT_IDLE = 0x11, - BD71837_REG_BUCK3_VOLT_RUN = 0x12, - BD71837_REG_BUCK4_VOLT_RUN = 0x13, - BD71837_REG_BUCK5_VOLT = 0x14, - BD71837_REG_BUCK6_VOLT = 0x15, - BD71837_REG_BUCK7_VOLT = 0x16, - BD71837_REG_BUCK8_VOLT = 0x17, - BD71837_REG_LDO1_VOLT = 0x18, - BD71837_REG_LDO2_VOLT = 0x19, - BD71837_REG_LDO3_VOLT = 0x1A, - BD71837_REG_LDO4_VOLT = 0x1B, - BD71837_REG_LDO5_VOLT = 0x1C, - BD71837_REG_LDO6_VOLT = 0x1D, - BD71837_REG_LDO7_VOLT = 0x1E, - BD71837_REG_TRANS_COND0 = 0x1F, - BD71837_REG_TRANS_COND1 = 0x20, - BD71837_REG_VRFAULTEN = 0x21, - BD718XX_REG_MVRFLTMASK0 = 0x22, - BD718XX_REG_MVRFLTMASK1 = 0x23, - BD718XX_REG_MVRFLTMASK2 = 0x24, - BD71837_REG_RCVCFG = 0x25, - BD71837_REG_RCVNUM = 0x26, - BD71837_REG_PWRONCONFIG0 = 0x27, - BD71837_REG_PWRONCONFIG1 = 0x28, - BD71837_REG_RESETSRC = 0x29, - BD71837_REG_MIRQ = 0x2A, - BD71837_REG_IRQ = 0x2B, - BD71837_REG_IN_MON = 0x2C, - BD71837_REG_POW_STATE = 0x2D, - BD71837_REG_OUT32K = 0x2E, - BD71837_REG_REGLOCK = 0x2F, - BD71837_REG_OTPVER = 0xFF, - BD71837_MAX_REGISTER = 0x100, + BD71837_REG_BUCK3_CTRL = 0x07, + BD71837_REG_BUCK4_CTRL = 0x08, + BD71837_REG_BUCK3_VOLT_RUN = 0x12, + BD71837_REG_BUCK4_VOLT_RUN = 0x13, + BD71837_REG_LDO7_VOLT = 0x1E, +}; + +/* Registers common for BD71837 and BD71847 */ +enum { + BD718XX_REG_REV = 0x00, + BD718XX_REG_SWRESET = 0x01, + BD718XX_REG_I2C_DEV = 0x02, + BD718XX_REG_PWRCTRL0 = 0x03, + BD718XX_REG_PWRCTRL1 = 0x04, + BD718XX_REG_BUCK1_CTRL = 0x05, + BD718XX_REG_BUCK2_CTRL = 0x06, + BD718XX_REG_1ST_NODVS_BUCK_CTRL = 0x09, + BD718XX_REG_2ND_NODVS_BUCK_CTRL = 0x0A, + BD718XX_REG_3RD_NODVS_BUCK_CTRL = 0x0B, + BD718XX_REG_4TH_NODVS_BUCK_CTRL = 0x0C, + BD718XX_REG_BUCK1_VOLT_RUN = 0x0D, + BD718XX_REG_BUCK1_VOLT_IDLE = 0x0E, + BD718XX_REG_BUCK1_VOLT_SUSP = 0x0F, + BD718XX_REG_BUCK2_VOLT_RUN = 0x10, + BD718XX_REG_BUCK2_VOLT_IDLE = 0x11, + BD718XX_REG_1ST_NODVS_BUCK_VOLT = 0x14, + BD718XX_REG_2ND_NODVS_BUCK_VOLT = 0x15, + BD718XX_REG_3RD_NODVS_BUCK_VOLT = 0x16, + BD718XX_REG_4TH_NODVS_BUCK_VOLT = 0x17, + BD718XX_REG_LDO1_VOLT = 0x18, + BD718XX_REG_LDO2_VOLT = 0x19, + BD718XX_REG_LDO3_VOLT = 0x1A, + BD718XX_REG_LDO4_VOLT = 0x1B, + BD718XX_REG_LDO5_VOLT = 0x1C, + BD718XX_REG_LDO6_VOLT = 0x1D, + BD718XX_REG_TRANS_COND0 = 0x1F, + BD718XX_REG_TRANS_COND1 = 0x20, + BD718XX_REG_VRFAULTEN = 0x21, + BD718XX_REG_MVRFLTMASK0 = 0x22, + BD718XX_REG_MVRFLTMASK1 = 0x23, + BD718XX_REG_MVRFLTMASK2 = 0x24, + BD718XX_REG_RCVCFG = 0x25, + BD718XX_REG_RCVNUM = 0x26, + BD718XX_REG_PWRONCONFIG0 = 0x27, + BD718XX_REG_PWRONCONFIG1 = 0x28, + BD718XX_REG_RESETSRC = 0x29, + BD718XX_REG_MIRQ = 0x2A, + BD718XX_REG_IRQ = 0x2B, + BD718XX_REG_IN_MON = 0x2C, + BD718XX_REG_POW_STATE = 0x2D, + BD718XX_REG_OUT32K = 0x2E, + BD718XX_REG_REGLOCK = 0x2F, + BD718XX_REG_OTPVER = 0xFF, + BD718XX_MAX_REGISTER = 0x100, }; #define REGLOCK_PWRSEQ 0x1 #define REGLOCK_VREG 0x10 /* Generic BUCK control masks */ -#define BD71837_BUCK_SEL 0x02 -#define BD71837_BUCK_EN 0x01 -#define BD71837_BUCK_RUN_ON 0x04 +#define BD718XX_BUCK_SEL 0x02 +#define BD718XX_BUCK_EN 0x01 +#define BD718XX_BUCK_RUN_ON 0x04 /* Generic LDO masks */ -#define BD71837_LDO_SEL 0x80 -#define BD71837_LDO_EN 0x40 +#define BD718XX_LDO_SEL 0x80 +#define BD718XX_LDO_EN 0x40 /* BD71837 BUCK ramp rate CTRL reg bits */ #define BUCK_RAMPRATE_MASK 0xC0 @@ -115,49 +130,35 @@ enum { #define BUCK_RAMPRATE_2P50MV 0x2 #define BUCK_RAMPRATE_1P25MV 0x3 -/* BD71837_REG_BUCK1_VOLT_RUN bits */ -#define BUCK1_RUN_MASK 0x3F -#define BUCK1_RUN_DEFAULT 0x14 - -/* BD71837_REG_BUCK1_VOLT_SUSP bits */ -#define BUCK1_SUSP_MASK 0x3F -#define BUCK1_SUSP_DEFAULT 0x14 +#define DVS_BUCK_RUN_MASK 0x3F +#define DVS_BUCK_SUSP_MASK 0x3F +#define DVS_BUCK_IDLE_MASK 0x3F -/* BD71837_REG_BUCK1_VOLT_IDLE bits */ -#define BUCK1_IDLE_MASK 0x3F -#define BUCK1_IDLE_DEFAULT 0x14 +#define BD718XX_1ST_NODVS_BUCK_MASK 0x07 +#define BD718XX_3RD_NODVS_BUCK_MASK 0x07 +#define BD718XX_4TH_NODVS_BUCK_MASK 0x3F -/* BD71837_REG_BUCK2_VOLT_RUN bits */ -#define BUCK2_RUN_MASK 0x3F -#define BUCK2_RUN_DEFAULT 0x1E +#define BD71847_BUCK3_MASK 0x07 +#define BD71847_BUCK3_RANGE_MASK 0xC0 +#define BD71847_BUCK4_MASK 0x03 +#define BD71847_BUCK4_RANGE_MASK 0x40 -/* BD71837_REG_BUCK2_VOLT_IDLE bits */ -#define BUCK2_IDLE_MASK 0x3F -#define BUCK2_IDLE_DEFAULT 0x14 +#define BD71837_BUCK5_MASK 0x07 +#define BD71837_BUCK5_RANGE_MASK 0x80 +#define BD71837_BUCK6_MASK 0x03 -/* BD71837_REG_BUCK3_VOLT_RUN bits */ -#define BUCK3_RUN_MASK 0x3F -#define BUCK3_RUN_DEFAULT 0x1E +#define BD718XX_LDO1_MASK 0x03 +#define BD718XX_LDO1_RANGE_MASK 0x20 +#define BD718XX_LDO2_MASK 0x20 +#define BD718XX_LDO3_MASK 0x0F +#define BD718XX_LDO4_MASK 0x0F +#define BD718XX_LDO6_MASK 0x0F -/* BD71837_REG_BUCK4_VOLT_RUN bits */ -#define BUCK4_RUN_MASK 0x3F -#define BUCK4_RUN_DEFAULT 0x1E +#define BD71837_LDO5_MASK 0x0F +#define BD71847_LDO5_MASK 0x0F +#define BD71847_LDO5_RANGE_MASK 0x20 -/* BD71837_REG_BUCK5_VOLT bits */ -#define BUCK5_MASK 0x07 -#define BUCK5_DEFAULT 0x02 - -/* BD71837_REG_BUCK6_VOLT bits */ -#define BUCK6_MASK 0x03 -#define BUCK6_DEFAULT 0x03 - -/* BD71837_REG_BUCK7_VOLT bits */ -#define BUCK7_MASK 0x07 -#define BUCK7_DEFAULT 0x03 - -/* BD71837_REG_BUCK8_VOLT bits */ -#define BUCK8_MASK 0x3F -#define BUCK8_DEFAULT 0x1E +#define BD71837_LDO7_MASK 0x0F /* BD718XX Voltage monitoring masks */ #define BD718XX_BUCK1_VRMON80 0x1 @@ -186,7 +187,7 @@ enum { #define BD71837_BUCK4_VRMON130 0x80 #define BD71837_LDO7_VRMON80 0x40 -/* BD71837_REG_IRQ bits */ +/* BD718XX_REG_IRQ bits */ #define IRQ_SWRST 0x40 #define IRQ_PWRON_S 0x20 #define IRQ_PWRON_L 0x10 @@ -195,52 +196,31 @@ enum { #define IRQ_ON_REQ 0x02 #define IRQ_STBY_REQ 0x01 -/* BD71837_REG_OUT32K bits */ -#define BD71837_OUT32K_EN 0x01 +/* BD718XX_REG_OUT32K bits */ +#define BD718XX_OUT32K_EN 0x01 -/* BD71837 gated clock rate */ -#define BD71837_CLK_RATE 32768 +/* BD7183XX gated clock rate */ +#define BD718XX_CLK_RATE 32768 -/* ROHM BD71837 irqs */ +/* ROHM BD718XX irqs */ enum { - BD71837_INT_STBY_REQ, - BD71837_INT_ON_REQ, - BD71837_INT_WDOG, - BD71837_INT_PWRBTN, - BD71837_INT_PWRBTN_L, - BD71837_INT_PWRBTN_S, - BD71837_INT_SWRST + BD718XX_INT_STBY_REQ, + BD718XX_INT_ON_REQ, + BD718XX_INT_WDOG, + BD718XX_INT_PWRBTN, + BD718XX_INT_PWRBTN_L, + BD718XX_INT_PWRBTN_S, + BD718XX_INT_SWRST }; -/* ROHM BD71837 interrupt masks */ -#define BD71837_INT_SWRST_MASK 0x40 -#define BD71837_INT_PWRBTN_S_MASK 0x20 -#define BD71837_INT_PWRBTN_L_MASK 0x10 -#define BD71837_INT_PWRBTN_MASK 0x8 -#define BD71837_INT_WDOG_MASK 0x4 -#define BD71837_INT_ON_REQ_MASK 0x2 -#define BD71837_INT_STBY_REQ_MASK 0x1 - -/* BD71837_REG_LDO1_VOLT bits */ -#define LDO1_MASK 0x03 - -/* BD71837_REG_LDO1_VOLT bits */ -#define LDO2_MASK 0x20 - -/* BD71837_REG_LDO3_VOLT bits */ -#define LDO3_MASK 0x0F - -/* BD71837_REG_LDO4_VOLT bits */ -#define LDO4_MASK 0x0F - -/* BD71837_REG_LDO5_VOLT bits */ -#define LDO5_MASK 0x0F - -/* BD71837_REG_LDO6_VOLT bits */ -#define LDO6_MASK 0x0F - -/* BD71837_REG_LDO7_VOLT bits */ -#define LDO7_MASK 0x0F +/* ROHM BD718XX interrupt masks */ +#define BD718XX_INT_SWRST_MASK 0x40 +#define BD718XX_INT_PWRBTN_S_MASK 0x20 +#define BD718XX_INT_PWRBTN_L_MASK 0x10 +#define BD718XX_INT_PWRBTN_MASK 0x8 +#define BD718XX_INT_WDOG_MASK 0x4 +#define BD718XX_INT_ON_REQ_MASK 0x2 +#define BD718XX_INT_STBY_REQ_MASK 0x1 /* Register write induced reset settings */ @@ -250,13 +230,13 @@ enum { * write 1 to it we will trigger the action. So always write 0 to it when * changning SWRESET action - no matter what we read from it. */ -#define BD71837_SWRESET_TYPE_MASK 7 -#define BD71837_SWRESET_TYPE_DISABLED 0 -#define BD71837_SWRESET_TYPE_COLD 4 -#define BD71837_SWRESET_TYPE_WARM 6 +#define BD718XX_SWRESET_TYPE_MASK 7 +#define BD718XX_SWRESET_TYPE_DISABLED 0 +#define BD718XX_SWRESET_TYPE_COLD 4 +#define BD718XX_SWRESET_TYPE_WARM 6 -#define BD71837_SWRESET_RESET_MASK 1 -#define BD71837_SWRESET_RESET 1 +#define BD718XX_SWRESET_RESET_MASK 1 +#define BD718XX_SWRESET_RESET 1 /* Poweroff state transition conditions */ @@ -341,10 +321,10 @@ enum { BD718XX_PWRBTN_LONG_PRESS_15S }; -struct bd71837_pmic; -struct bd71837_clk; +struct bd718xx_clk; -struct bd71837 { +struct bd718xx { + unsigned int chip_type; struct device *dev; struct regmap *regmap; unsigned long int id; @@ -352,8 +332,7 @@ struct bd71837 { int chip_irq; struct regmap_irq_chip_data *irq_data; - struct bd71837_pmic *pmic; - struct bd71837_clk *clk; + struct bd718xx_clk *clk; }; -#endif /* __LINUX_MFD_BD71837_H__ */ +#endif /* __LINUX_MFD_BD718XX_H__ */ diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 28f4ae76271d..3ca17eb89aa2 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -1,14 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* - * core.h - * - * copyright (c) 2011 Samsung Electronics Co., Ltd + * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #ifndef __LINUX_MFD_SEC_CORE_H diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h index 667aa40486dd..6cfe4201a106 100644 --- a/include/linux/mfd/samsung/irq.h +++ b/include/linux/mfd/samsung/irq.h @@ -1,13 +1,7 @@ -/* irq.h - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * Copyright (c) 2012 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #ifndef __LINUX_MFD_SEC_IRQ_H diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 9ed2871ea335..0204decfc9aa 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -1,18 +1,7 @@ -/* rtc.h - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __LINUX_MFD_SEC_RTC_H diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h index 2766108bca2f..0762e9de6f2f 100644 --- a/include/linux/mfd/samsung/s2mpa01.h +++ b/include/linux/mfd/samsung/s2mpa01.h @@ -1,12 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (c) 2013 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #ifndef __LINUX_MFD_S2MPA01_H diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 2c14eeca46f0..6e7668a389a1 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -1,14 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* - * s2mps11.h - * * Copyright (c) 2012 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #ifndef __LINUX_MFD_S2MPS11_H diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index 239e977ba45d..b96d8a11dcd3 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h @@ -1,19 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* - * s2mps13.h - * * Copyright (c) 2014 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __LINUX_MFD_S2MPS13_H diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h index c92f4782afb5..f4afa0cfc24f 100644 --- a/include/linux/mfd/samsung/s2mps14.h +++ b/include/linux/mfd/samsung/s2mps14.h @@ -1,19 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* - * s2mps14.h - * * Copyright (c) 2014 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __LINUX_MFD_S2MPS14_H diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h index 36d35287c3c0..eac6bf74b72e 100644 --- a/include/linux/mfd/samsung/s2mps15.h +++ b/include/linux/mfd/samsung/s2mps15.h @@ -1,16 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (c) 2015 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __LINUX_MFD_S2MPS15_H diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h index 47ae9bc583a7..76cd5380cf0f 100644 --- a/include/linux/mfd/samsung/s2mpu02.h +++ b/include/linux/mfd/samsung/s2mpu02.h @@ -1,19 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* - * s2mpu02.h - * * Copyright (c) 2014 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __LINUX_MFD_S2MPU02_H diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h index e025418e5589..c534f086ca16 100644 --- a/include/linux/mfd/samsung/s5m8763.h +++ b/include/linux/mfd/samsung/s5m8763.h @@ -1,13 +1,7 @@ -/* s5m8763.h - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #ifndef __LINUX_MFD_S5M8763_H diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h index 243b58fec33d..704f8d80e96e 100644 --- a/include/linux/mfd/samsung/s5m8767.h +++ b/include/linux/mfd/samsung/s5m8767.h @@ -1,13 +1,7 @@ -/* s5m8767.h - * +/* SPDX-License-Identifier: GPL-2.0+ */ +/* * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #ifndef __LINUX_MFD_S5M8767_H diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h index 09d5f30384e5..1ef51ed36be5 100644 --- a/include/linux/mfd/ti-lmu.h +++ b/include/linux/mfd/ti-lmu.h @@ -16,6 +16,7 @@ #include <linux/gpio.h> #include <linux/notifier.h> #include <linux/regmap.h> +#include <linux/gpio/consumer.h> /* Notifier event */ #define LMU_EVENT_MONITOR_DONE 0x01 @@ -81,7 +82,7 @@ enum lm363x_regulator_id { struct ti_lmu { struct device *dev; struct regmap *regmap; - int en_gpio; + struct gpio_desc *en_gpio; struct blocking_notifier_head notifier; }; #endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 77866214ab51..1e70060c92ce 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -62,13 +62,6 @@ #define TMIO_MMC_USE_GPIO_CD BIT(5) /* - * Some controllers doesn't have over 0x100 register. - * it is used to checking accessibility of - * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL - */ -#define TMIO_MMC_HAVE_HIGH_REG BIT(6) - -/* * Some controllers have CMD12 automatically * issue/non-issue register */ diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 472fa4d4ea62..7361cd3fddc1 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -31,6 +31,7 @@ #define PHY_ID_KSZ8081 0x00221560 #define PHY_ID_KSZ8061 0x00221570 #define PHY_ID_KSZ9031 0x00221620 +#define PHY_ID_KSZ9131 0x00221640 #define PHY_ID_KSZ886X 0x00221430 #define PHY_ID_KSZ8863 0x00221435 diff --git a/include/linux/mii.h b/include/linux/mii.h index 55000ee5c6ad..2da85b02e1c0 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -10,6 +10,7 @@ #include <linux/if.h> +#include <linux/linkmode.h> #include <uapi/linux/mii.h> struct ethtool_cmd; @@ -132,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) } /** + * linkmode_adv_to_mii_adv_t + * @advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * MII_ADVERTISE register. + */ +static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising)) + result |= ADVERTISE_10HALF; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising)) + result |= ADVERTISE_10FULL; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising)) + result |= ADVERTISE_100HALF; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising)) + result |= ADVERTISE_100FULL; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) + result |= ADVERTISE_PAUSE_CAP; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) + result |= ADVERTISE_PAUSE_ASYM; + + return result; +} + +/** * mii_adv_to_ethtool_adv_t * @adv: value of the MII_ADVERTISE register * @@ -179,6 +208,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) } /** + * linkmode_adv_to_mii_ctrl1000_t + * advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * MII_CTRL1000 register when in 1000T mode. + */ +static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + advertising)) + result |= ADVERTISE_1000HALF; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising)) + result |= ADVERTISE_1000FULL; + + return result; +} + +/** * mii_ctrl1000_to_ethtool_adv_t * @adv: value of the MII_CTRL1000 register * @@ -303,6 +354,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) } /** + * mii_adv_to_linkmode_adv_t + * @advertising:pointer to destination link mode. + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits + * to linkmode advertisement settings. + */ +static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising, + u32 adv) +{ + linkmode_zero(advertising); + + if (adv & ADVERTISE_10HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + advertising); + if (adv & ADVERTISE_10FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising); + if (adv & ADVERTISE_100HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + advertising); + if (adv & ADVERTISE_100FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising); + if (adv & ADVERTISE_PAUSE_CAP) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); + if (adv & ADVERTISE_PAUSE_ASYM) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); +} + +/** + * ethtool_adv_to_lcl_adv_t + * @advertising:pointer to ethtool advertising + * + * A small helper function that translates ethtool advertising to LVL + * pause capabilities. + */ +static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising) +{ + u32 lcl_adv = 0; + + if (advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + return lcl_adv; +} + +/** * mii_advertise_flowctrl - get flow control advertisement flags * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 0ef6138eca49..31a750570c38 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -61,6 +61,7 @@ struct mlx5_core_cq { int reset_notify_added; struct list_head reset_notify; struct mlx5_eq *eq; + u16 uid; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 11fa4e66afc5..b4c0457fbebd 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -504,6 +504,10 @@ struct health_buffer { __be16 ext_synd; }; +enum mlx5_cmd_addr_l_sz_offset { + MLX5_NIC_IFC_OFFSET = 8, +}; + struct mlx5_init_seg { __be32 fw_rev; __be32 cmdif_rev_fw_sub; @@ -1120,6 +1124,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) +#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap) + +#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap) + #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 66d94b4557cf..aa5963b5d38e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -97,14 +97,15 @@ enum { }; enum { - MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, - MLX5_ATOMIC_MODE_CX = 2 << 16, - MLX5_ATOMIC_MODE_8B = 3 << 16, - MLX5_ATOMIC_MODE_16B = 4 << 16, - MLX5_ATOMIC_MODE_32B = 5 << 16, - MLX5_ATOMIC_MODE_64B = 6 << 16, - MLX5_ATOMIC_MODE_128B = 7 << 16, - MLX5_ATOMIC_MODE_256B = 8 << 16, + MLX5_ATOMIC_MODE_OFFSET = 16, + MLX5_ATOMIC_MODE_IB_COMP = 1, + MLX5_ATOMIC_MODE_CX = 2, + MLX5_ATOMIC_MODE_8B = 3, + MLX5_ATOMIC_MODE_16B = 4, + MLX5_ATOMIC_MODE_32B = 5, + MLX5_ATOMIC_MODE_64B = 6, + MLX5_ATOMIC_MODE_128B = 7, + MLX5_ATOMIC_MODE_256B = 8, }; enum { @@ -133,6 +134,7 @@ enum { MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, MLX5_REG_PMLP = 0x5002, + MLX5_REG_PPLM = 0x5023, MLX5_REG_PCAM = 0x507f, MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, @@ -162,16 +164,11 @@ enum mlx5_dcbx_oper_mode { MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, }; -enum mlx5_dct_atomic_mode { - MLX5_ATOMIC_MODE_DCT_OFF = 20, - MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, - MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, - MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, -}; - enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, + MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, + MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, }; enum mlx5_page_fault_resume_flags { @@ -360,7 +357,7 @@ struct mlx5_frag_buf { }; struct mlx5_frag_buf_ctrl { - struct mlx5_frag_buf frag_buf; + struct mlx5_buf_list *frags; u32 sz_m1; u16 frag_sz_m1; u16 strides_offset; @@ -477,6 +474,7 @@ struct mlx5_core_srq { atomic_t refcount; struct completion free; + u16 uid; }; struct mlx5_eq_table { @@ -583,10 +581,11 @@ struct mlx5_irq_info { }; struct mlx5_fc_stats { - struct rb_root counters; - struct list_head addlist; - /* protect addlist add/splice operations */ - spinlock_t addlist_lock; + spinlock_t counters_idr_lock; /* protects counters_idr */ + struct idr counters_idr; + struct list_head counters; + struct llist_head addlist; + struct llist_head dellist; struct workqueue_struct *wq; struct delayed_work work; @@ -804,7 +803,7 @@ struct mlx5_pps { }; struct mlx5_clock { - rwlock_t lock; + seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; struct hwtstamp_config hwtstamp_config; @@ -837,6 +836,7 @@ struct mlx5_core_dev { u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; } caps; + u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; enum mlx5_device_state state; @@ -994,10 +994,12 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } -static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, +static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, + u8 log_stride, u8 log_sz, u16 strides_offset, struct mlx5_frag_buf_ctrl *fbc) { + fbc->frags = frags; fbc->log_stride = log_stride; fbc->log_sz = log_sz; fbc->sz_m1 = (1 << fbc->log_sz) - 1; @@ -1006,18 +1008,11 @@ static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, fbc->strides_offset = strides_offset; } -static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, +static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, + u8 log_stride, u8 log_sz, struct mlx5_frag_buf_ctrl *fbc) { - mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc); -} - -static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, - void *cqc) -{ - mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz), - MLX5_GET(cqc, cqc, log_cq_size), - fbc); + mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); } static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, @@ -1028,8 +1023,15 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, ix += fbc->strides_offset; frag = ix >> fbc->log_frag_strides; - return fbc->frag_buf.frags[frag].buf + - ((fbc->frag_sz_m1 & ix) << fbc->log_stride); + return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } int mlx5_cmd_init(struct mlx5_core_dev *dev); @@ -1226,21 +1228,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); -#ifndef CONFIG_MLX5_CORE_IPOIB -static inline -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, - struct ib_device *ibdev, - const char *name, - void (*setup)(struct net_device *)) -{ - return ERR_PTR(-EOPNOTSUPP); -} -#else +#ifdef CONFIG_MLX5_CORE_IPOIB struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, struct ib_device *ibdev, const char *name, void (*setup)(struct net_device *)); #endif /* CONFIG_MLX5_CORE_IPOIB */ +int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, + struct ib_device *device, + struct rdma_netdev_alloc_params *params); struct mlx5_profile { u64 mask; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 804516e4f483..5660f07d3be0 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -45,7 +45,8 @@ enum { }; enum { - MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0), + MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0), + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1), }; #define LEFTOVERS_RULE_NUM 2 @@ -91,7 +92,7 @@ struct mlx5_flow_destination { u32 tir_num; u32 ft_num; struct mlx5_flow_table *ft; - struct mlx5_fc *counter; + u32 counter_id; struct { u16 num; u16 vhca_id; @@ -101,6 +102,8 @@ struct mlx5_flow_destination { }; struct mlx5_flow_namespace * +mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n); +struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); struct mlx5_flow_namespace * @@ -155,20 +158,28 @@ struct mlx5_fs_vlan { #define MLX5_FS_VLAN_DEPTH 2 +enum { + FLOW_ACT_HAS_TAG = BIT(0), + FLOW_ACT_NO_APPEND = BIT(1), +}; + struct mlx5_flow_act { u32 action; - bool has_flow_tag; u32 flow_tag; - u32 encap_id; + u32 reformat_id; u32 modify_id; uintptr_t esp_id; + u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; }; #define MLX5_DECLARE_FLOW_ACT(name) \ - struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ - MLX5_FS_DEFAULT_FLOW_TAG, 0, 0} + struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\ + .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \ + .reformat_id = 0, \ + .modify_id = 0, \ + .flags = 0, } /* Single destination per rule. * Group ID is implied by the match criteria. @@ -185,15 +196,30 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_flow_destination *new_dest, struct mlx5_flow_destination *old_dest); -struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse); int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, u64 *packets, u64 *bytes); +u32 mlx5_fc_id(struct mlx5_fc *counter); int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn); +int mlx5_modify_header_alloc(struct mlx5_core_dev *dev, + u8 namespace, u8 num_actions, + void *modify_actions, u32 *modify_header_id); +void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, + u32 modify_header_id); + +int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, + int reformat_type, + size_t size, + void *reformat_data, + enum mlx5_flow_namespace_type namespace, + u32 *packet_reformat_id); +void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, + u32 packet_reformat_id); + #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f043d65b9bac..dbff9ff28f2c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -243,8 +243,8 @@ enum { MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, - MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d, - MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e, + MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d, + MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e, MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, @@ -336,7 +336,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 modify_root[0x1]; u8 identified_miss_table_mode[0x1]; u8 flow_table_modify[0x1]; - u8 encap[0x1]; + u8 reformat[0x1]; u8 decap[0x1]; u8 reserved_at_9[0x1]; u8 pop_vlan[0x1]; @@ -344,8 +344,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_c[0x1]; u8 pop_vlan_2[0x1]; u8 push_vlan_2[0x1]; - u8 reserved_at_f[0x11]; - + u8 reformat_and_vlan_action[0x1]; + u8 reserved_at_10[0x2]; + u8 reformat_l3_tunnel_to_l2[0x1]; + u8 reformat_l2_to_l3_tunnel[0x1]; + u8 reformat_and_modify_action[0x1]; + u8 reserved_at_14[0xb]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; @@ -554,7 +558,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits { u8 nic_rx_multi_path_tirs[0x1]; u8 nic_rx_multi_path_tirs_fts[0x1]; u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; - u8 reserved_at_3[0x1fd]; + u8 reserved_at_3[0x1d]; + u8 encap_general_header[0x1]; + u8 reserved_at_21[0xa]; + u8 log_max_packet_reformat_context[0x5]; + u8 reserved_at_30[0x6]; + u8 max_encap_header_size[0xa]; + u8 reserved_at_40[0x1c0]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; @@ -574,7 +584,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_at_0[0x1c]; u8 fdb_multi_path_to_table[0x1]; - u8 reserved_at_1d[0x1e3]; + u8 reserved_at_1d[0x1]; + u8 multi_fdb_encap[0x1]; + u8 reserved_at_1e[0x1e1]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; @@ -599,7 +611,7 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vxlan_encap_decap[0x1]; u8 nvgre_encap_decap[0x1]; u8 reserved_at_22[0x9]; - u8 log_max_encap_headers[0x5]; + u8 log_max_packet_reformat_context[0x5]; u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; @@ -896,7 +908,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_mkey[0x6]; u8 reserved_at_f0[0x8]; u8 dump_fill_mkey[0x1]; - u8 reserved_at_f9[0x3]; + u8 reserved_at_f9[0x2]; + u8 fast_teardown[0x1]; u8 log_max_eq[0x4]; u8 max_indirection[0x8]; @@ -995,7 +1008,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 umr_modify_atomic_disabled[0x1]; u8 umr_indirect_mkey_disabled[0x1]; u8 umr_fence[0x2]; - u8 reserved_at_20c[0x3]; + u8 dc_req_scat_data_cqe[0x1]; + u8 reserved_at_20d[0x2]; u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; @@ -1280,7 +1294,9 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x3]; + u8 dbr_umem_valid[0x1]; + u8 wq_umem_valid[0x1]; + u8 reserved_at_122[0x1]; u8 log_hairpin_num_packets[0x5]; u8 reserved_at_128[0x3]; u8 log_hairpin_data_sz[0x5]; @@ -2354,7 +2370,10 @@ struct mlx5_ifc_qpc_bits { u8 dc_access_key[0x40]; - u8 reserved_at_680[0xc0]; + u8 reserved_at_680[0x3]; + u8 dbr_umem_valid[0x1]; + + u8 reserved_at_684[0xbc]; }; struct mlx5_ifc_roce_addr_layout_bits { @@ -2394,7 +2413,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8, - MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10, + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10, MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20, MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, @@ -2427,7 +2446,7 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_a0[0x8]; u8 flow_counter_list_size[0x18]; - u8 encap_id[0x20]; + u8 packet_reformat_id[0x20]; u8 modify_header_id[0x20]; @@ -2454,7 +2473,7 @@ struct mlx5_ifc_xrc_srqc_bits { u8 wq_signature[0x1]; u8 cont_srq[0x1]; - u8 reserved_at_22[0x1]; + u8 dbr_umem_valid[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; @@ -2549,8 +2568,8 @@ enum { }; enum { - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2, }; struct mlx5_ifc_tirc_bits { @@ -3118,7 +3137,9 @@ enum { struct mlx5_ifc_cqc_bits { u8 status[0x4]; - u8 reserved_at_4[0x4]; + u8 reserved_at_4[0x2]; + u8 dbr_umem_valid[0x1]; + u8 reserved_at_7[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; @@ -3352,12 +3373,13 @@ struct mlx5_ifc_teardown_hca_out_bits { u8 reserved_at_40[0x3f]; - u8 force_state[0x1]; + u8 state[0x1]; }; enum { MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1, + MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2, }; struct mlx5_ifc_teardown_hca_in_bits { @@ -3384,7 +3406,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits { struct mlx5_ifc_sqerr2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -3414,7 +3436,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits { struct mlx5_ifc_sqd2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -3619,7 +3641,7 @@ struct mlx5_ifc_rts2rts_qp_out_bits { struct mlx5_ifc_rts2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -3649,7 +3671,7 @@ struct mlx5_ifc_rtr2rts_qp_out_bits { struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -3679,7 +3701,7 @@ struct mlx5_ifc_rst2init_qp_out_bits { struct mlx5_ifc_rst2init_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -4802,19 +4824,19 @@ struct mlx5_ifc_query_eq_in_bits { u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_encap_header_in_bits { +struct mlx5_ifc_packet_reformat_context_in_bits { u8 reserved_at_0[0x5]; - u8 header_type[0x3]; + u8 reformat_type[0x3]; u8 reserved_at_8[0xe]; - u8 encap_header_size[0xa]; + u8 reformat_data_size[0xa]; u8 reserved_at_20[0x10]; - u8 encap_header[2][0x8]; + u8 reformat_data[2][0x8]; - u8 more_encap_header[0][0x8]; + u8 more_reformat_data[0][0x8]; }; -struct mlx5_ifc_query_encap_header_out_bits { +struct mlx5_ifc_query_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4822,33 +4844,41 @@ struct mlx5_ifc_query_encap_header_out_bits { u8 reserved_at_40[0xa0]; - struct mlx5_ifc_encap_header_in_bits encap_header[0]; + struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0]; }; -struct mlx5_ifc_query_encap_header_in_bits { +struct mlx5_ifc_query_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 encap_id[0x20]; + u8 packet_reformat_id[0x20]; u8 reserved_at_60[0xa0]; }; -struct mlx5_ifc_alloc_encap_header_out_bits { +struct mlx5_ifc_alloc_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; u8 syndrome[0x20]; - u8 encap_id[0x20]; + u8 packet_reformat_id[0x20]; u8 reserved_at_60[0x20]; }; -struct mlx5_ifc_alloc_encap_header_in_bits { +enum { + MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0, + MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1, + MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2, + MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3, + MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4, +}; + +struct mlx5_ifc_alloc_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -4857,10 +4887,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits { u8 reserved_at_40[0xa0]; - struct mlx5_ifc_encap_header_in_bits encap_header; + struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context; }; -struct mlx5_ifc_dealloc_encap_header_out_bits { +struct mlx5_ifc_dealloc_packet_reformat_context_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -4869,14 +4899,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_dealloc_encap_header_in_bits { +struct mlx5_ifc_dealloc_packet_reformat_context_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; u8 reserved_20[0x10]; u8 op_mod[0x10]; - u8 encap_id[0x20]; + u8 packet_reformat_id[0x20]; u8 reserved_60[0x20]; }; @@ -5174,7 +5204,7 @@ struct mlx5_ifc_qp_2rst_out_bits { struct mlx5_ifc_qp_2rst_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5196,7 +5226,7 @@ struct mlx5_ifc_qp_2err_out_bits { struct mlx5_ifc_qp_2err_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5296,7 +5326,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits { struct mlx5_ifc_modify_tis_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5335,7 +5365,7 @@ struct mlx5_ifc_modify_tir_out_bits { struct mlx5_ifc_modify_tir_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5363,7 +5393,7 @@ struct mlx5_ifc_modify_sq_out_bits { struct mlx5_ifc_modify_sq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5436,7 +5466,7 @@ struct mlx5_ifc_rqt_bitmask_bits { struct mlx5_ifc_modify_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5470,7 +5500,7 @@ enum { struct mlx5_ifc_modify_rq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5506,7 +5536,7 @@ struct mlx5_ifc_rmp_bitmask_bits { struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5611,7 +5641,7 @@ enum { struct mlx5_ifc_modify_cq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5623,7 +5653,10 @@ struct mlx5_ifc_modify_cq_in_bits { struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_at_280[0x600]; + u8 reserved_at_280[0x40]; + + u8 cq_umem_valid[0x1]; + u8 reserved_at_2c1[0x5bf]; u8 pas[0][0x40]; }; @@ -5771,7 +5804,7 @@ struct mlx5_ifc_init2rtr_qp_out_bits { struct mlx5_ifc_init2rtr_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5801,7 +5834,7 @@ struct mlx5_ifc_init2init_qp_out_bits { struct mlx5_ifc_init2init_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5900,7 +5933,7 @@ struct mlx5_ifc_drain_dct_out_bits { struct mlx5_ifc_drain_dct_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5944,7 +5977,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits { struct mlx5_ifc_detach_from_mcg_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5968,7 +6001,7 @@ struct mlx5_ifc_destroy_xrq_out_bits { struct mlx5_ifc_destroy_xrq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -5990,7 +6023,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits { struct mlx5_ifc_destroy_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6012,7 +6045,7 @@ struct mlx5_ifc_destroy_tis_out_bits { struct mlx5_ifc_destroy_tis_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6034,7 +6067,7 @@ struct mlx5_ifc_destroy_tir_out_bits { struct mlx5_ifc_destroy_tir_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6056,7 +6089,7 @@ struct mlx5_ifc_destroy_srq_out_bits { struct mlx5_ifc_destroy_srq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6078,7 +6111,7 @@ struct mlx5_ifc_destroy_sq_out_bits { struct mlx5_ifc_destroy_sq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6124,7 +6157,7 @@ struct mlx5_ifc_destroy_rqt_out_bits { struct mlx5_ifc_destroy_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6146,7 +6179,7 @@ struct mlx5_ifc_destroy_rq_out_bits { struct mlx5_ifc_destroy_rq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6190,7 +6223,7 @@ struct mlx5_ifc_destroy_rmp_out_bits { struct mlx5_ifc_destroy_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6212,7 +6245,7 @@ struct mlx5_ifc_destroy_qp_out_bits { struct mlx5_ifc_destroy_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6364,7 +6397,7 @@ struct mlx5_ifc_destroy_dct_out_bits { struct mlx5_ifc_destroy_dct_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6386,7 +6419,7 @@ struct mlx5_ifc_destroy_cq_out_bits { struct mlx5_ifc_destroy_cq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6489,7 +6522,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits { struct mlx5_ifc_dealloc_xrcd_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6577,7 +6610,7 @@ struct mlx5_ifc_dealloc_pd_out_bits { struct mlx5_ifc_dealloc_pd_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6623,7 +6656,7 @@ struct mlx5_ifc_create_xrq_out_bits { struct mlx5_ifc_create_xrq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6647,7 +6680,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits { struct mlx5_ifc_create_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6656,7 +6689,9 @@ struct mlx5_ifc_create_xrc_srq_in_bits { struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; - u8 reserved_at_280[0x600]; + u8 reserved_at_280[0x40]; + u8 xrc_srq_umem_valid[0x1]; + u8 reserved_at_2c1[0x5bf]; u8 pas[0][0x40]; }; @@ -6675,7 +6710,7 @@ struct mlx5_ifc_create_tis_out_bits { struct mlx5_ifc_create_tis_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6699,7 +6734,7 @@ struct mlx5_ifc_create_tir_out_bits { struct mlx5_ifc_create_tir_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6723,7 +6758,7 @@ struct mlx5_ifc_create_srq_out_bits { struct mlx5_ifc_create_srq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6751,7 +6786,7 @@ struct mlx5_ifc_create_sq_out_bits { struct mlx5_ifc_create_sq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6805,7 +6840,7 @@ struct mlx5_ifc_create_rqt_out_bits { struct mlx5_ifc_create_rqt_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6829,7 +6864,7 @@ struct mlx5_ifc_create_rq_out_bits { struct mlx5_ifc_create_rq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6853,7 +6888,7 @@ struct mlx5_ifc_create_rmp_out_bits { struct mlx5_ifc_create_rmp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6877,7 +6912,7 @@ struct mlx5_ifc_create_qp_out_bits { struct mlx5_ifc_create_qp_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6890,7 +6925,10 @@ struct mlx5_ifc_create_qp_in_bits { struct mlx5_ifc_qpc_bits qpc; - u8 reserved_at_800[0x80]; + u8 reserved_at_800[0x60]; + + u8 wq_umem_valid[0x1]; + u8 reserved_at_861[0x1f]; u8 pas[0][0x40]; }; @@ -6952,7 +6990,8 @@ struct mlx5_ifc_create_mkey_in_bits { u8 reserved_at_40[0x20]; u8 pg_access[0x1]; - u8 reserved_at_61[0x1f]; + u8 mkey_umem_valid[0x1]; + u8 reserved_at_62[0x1e]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; @@ -6978,7 +7017,7 @@ struct mlx5_ifc_create_flow_table_out_bits { }; struct mlx5_ifc_flow_table_context_bits { - u8 encap_en[0x1]; + u8 reformat_en[0x1]; u8 decap_en[0x1]; u8 reserved_at_2[0x2]; u8 table_miss_action[0x4]; @@ -7120,7 +7159,7 @@ struct mlx5_ifc_create_dct_out_bits { struct mlx5_ifc_create_dct_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7146,7 +7185,7 @@ struct mlx5_ifc_create_cq_out_bits { struct mlx5_ifc_create_cq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7155,7 +7194,10 @@ struct mlx5_ifc_create_cq_in_bits { struct mlx5_ifc_cqc_bits cq_context; - u8 reserved_at_280[0x600]; + u8 reserved_at_280[0x60]; + + u8 cq_umem_valid[0x1]; + u8 reserved_at_2e1[0x59f]; u8 pas[0][0x40]; }; @@ -7203,7 +7245,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits { struct mlx5_ifc_attach_to_mcg_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7254,7 +7296,7 @@ enum { struct mlx5_ifc_arm_xrc_srq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7282,7 +7324,7 @@ enum { struct mlx5_ifc_arm_rq_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7330,7 +7372,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits { struct mlx5_ifc_alloc_xrcd_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7418,7 +7460,7 @@ struct mlx5_ifc_alloc_pd_out_bits { struct mlx5_ifc_alloc_pd_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7786,20 +7828,34 @@ struct mlx5_ifc_pplr_reg_bits { struct mlx5_ifc_pplm_reg_bits { u8 reserved_at_0[0x8]; - u8 local_port[0x8]; - u8 reserved_at_10[0x10]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; - u8 reserved_at_20[0x20]; + u8 reserved_at_20[0x20]; - u8 port_profile_mode[0x8]; - u8 static_port_profile[0x8]; - u8 active_port_profile[0x8]; - u8 reserved_at_58[0x8]; + u8 port_profile_mode[0x8]; + u8 static_port_profile[0x8]; + u8 active_port_profile[0x8]; + u8 reserved_at_58[0x8]; - u8 retransmission_active[0x8]; - u8 fec_mode_active[0x18]; + u8 retransmission_active[0x8]; + u8 fec_mode_active[0x18]; - u8 reserved_at_80[0x20]; + u8 rs_fec_correction_bypass_cap[0x4]; + u8 reserved_at_84[0x8]; + u8 fec_override_cap_56g[0x4]; + u8 fec_override_cap_100g[0x4]; + u8 fec_override_cap_50g[0x4]; + u8 fec_override_cap_25g[0x4]; + u8 fec_override_cap_10g_40g[0x4]; + + u8 rs_fec_correction_bypass_admin[0x4]; + u8 reserved_at_a4[0x8]; + u8 fec_override_admin_56g[0x4]; + u8 fec_override_admin_100g[0x4]; + u8 fec_override_admin_50g[0x4]; + u8 fec_override_admin_25g[0x4]; + u8 fec_override_admin_10g_40g[0x4]; }; struct mlx5_ifc_ppcnt_reg_bits { @@ -8084,7 +8140,8 @@ struct mlx5_ifc_pcam_enhanced_features_bits { u8 rx_icrc_encapsulated_counter[0x1]; u8 reserved_at_6e[0x8]; u8 pfcc_mask[0x1]; - u8 reserved_at_77[0x4]; + u8 reserved_at_77[0x3]; + u8 per_lane_error_counters[0x1]; u8 rx_buffer_fullness_counters[0x1]; u8 ptys_connector_type[0x1]; u8 reserved_at_7d[0x1]; @@ -8095,7 +8152,10 @@ struct mlx5_ifc_pcam_enhanced_features_bits { struct mlx5_ifc_pcam_regs_5000_to_507f_bits { u8 port_access_reg_cap_mask_127_to_96[0x20]; u8 port_access_reg_cap_mask_95_to_64[0x20]; - u8 port_access_reg_cap_mask_63_to_32[0x20]; + + u8 port_access_reg_cap_mask_63_to_36[0x1c]; + u8 pplm[0x1]; + u8 port_access_reg_cap_mask_34_to_32[0x3]; u8 port_access_reg_cap_mask_31_to_13[0x13]; u8 pbmc[0x1]; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 4778d41085d4..fbe322c966bc 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -471,6 +471,7 @@ struct mlx5_core_qp { int qpn; struct mlx5_rsc_debug *dbg; int pid; + u16 uid; }; struct mlx5_core_dct { diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h index 24ff23e27c8a..1b1f3c20c6a3 100644 --- a/include/linux/mlx5/srq.h +++ b/include/linux/mlx5/srq.h @@ -61,6 +61,7 @@ struct mlx5_srq_attr { u32 tm_next_tag; u32 tm_hw_phase_cnt; u32 tm_sw_phase_cnt; + u16 uid; }; struct mlx5_core_dev; diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 83a33a1873a6..7f5ca2cd3a32 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -90,6 +90,8 @@ struct mlx5_hairpin { u32 *rqn; u32 *sqn; + + bool peer_gone; }; struct mlx5_hairpin * diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 7e7c6dfcfb09..9c694808c212 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, struct mlx5_core_dev *port_mdev); int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); + +u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index a61ebe8ad4ca..1e52b8fd1685 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -848,6 +848,8 @@ static inline bool is_zone_device_page(const struct page *page) { return page_zonenum(page) == ZONE_DEVICE; } +extern void memmap_init_zone_device(struct zone *, unsigned long, + unsigned long, struct dev_pagemap *); #else static inline bool is_zone_device_page(const struct page *page) { @@ -890,6 +892,19 @@ static inline bool is_device_public_page(const struct page *page) page->pgmap->type == MEMORY_DEVICE_PUBLIC; } +#ifdef CONFIG_PCI_P2PDMA +static inline bool is_pci_p2pdma_page(const struct page *page) +{ + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; +} +#else /* CONFIG_PCI_P2PDMA */ +static inline bool is_pci_p2pdma_page(const struct page *page) +{ + return false; +} +#endif /* CONFIG_PCI_P2PDMA */ + #else /* CONFIG_DEV_PAGEMAP_OPS */ static inline void dev_pagemap_get_ops(void) { @@ -913,6 +928,11 @@ static inline bool is_device_public_page(const struct page *page) { return false; } + +static inline bool is_pci_p2pdma_page(const struct page *page) +{ + return false; +} #endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline void get_page(struct page *page) @@ -2286,6 +2306,8 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); +extern int __do_munmap(struct mm_struct *, unsigned long, size_t, + struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); @@ -2455,6 +2477,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, return vma; } +static inline bool range_in_vma(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + return (vma && vma->vm_start <= start && end <= vma->vm_end); +} + #ifdef CONFIG_MMU pgprot_t vm_get_page_prot(unsigned long vm_flags); void vma_set_page_prot(struct vm_area_struct *vma); @@ -2478,11 +2506,11 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); -int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, +vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); -int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, +vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); -int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, +vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); @@ -2501,32 +2529,6 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } -static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, - unsigned long addr, pfn_t pfn) -{ - int err = vm_insert_mixed(vma, addr, pfn); - - if (err == -ENOMEM) - return VM_FAULT_OOM; - if (err < 0 && err != -EBUSY) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; -} - -static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn) -{ - int err = vm_insert_pfn(vma, addr, pfn); - - if (err == -ENOMEM) - return VM_FAULT_OOM; - if (err < 0 && err != -EBUSY) - return VM_FAULT_SIGBUS; - - return VM_FAULT_NOPAGE; -} - static inline vm_fault_t vmf_error(int err) { if (err == -ENOMEM) @@ -2534,16 +2536,8 @@ static inline vm_fault_t vmf_error(int err) return VM_FAULT_SIGBUS; } -struct page *follow_page_mask(struct vm_area_struct *vma, - unsigned long address, unsigned int foll_flags, - unsigned int *page_mask); - -static inline struct page *follow_page(struct vm_area_struct *vma, - unsigned long address, unsigned int foll_flags) -{ - unsigned int unused_page_mask; - return follow_page_mask(vma, address, foll_flags, &unused_page_mask); -} +struct page *follow_page(struct vm_area_struct *vma, unsigned long address, + unsigned int foll_flags); #define FOLL_WRITE 0x01 /* check pte is writable */ #define FOLL_TOUCH 0x02 /* mark page accessed */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index beed7121c781..2a5fe75dd082 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -569,6 +569,11 @@ static inline bool mmc_can_retune(struct mmc_host *host) return host->can_retune == 1; } +static inline bool mmc_doing_retune(struct mmc_host *host) +{ + return host->doing_retune == 1; +} + static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) { return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 133ba78820ee..9893a6432adf 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -2,7 +2,6 @@ #ifndef _LINUX_MMU_NOTIFIER_H #define _LINUX_MMU_NOTIFIER_H -#include <linux/types.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> @@ -11,9 +10,6 @@ struct mmu_notifier; struct mmu_notifier_ops; -/* mmu_notifier_ops flags */ -#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01) - #ifdef CONFIG_MMU_NOTIFIER /* @@ -31,15 +27,6 @@ struct mmu_notifier_mm { struct mmu_notifier_ops { /* - * Flags to specify behavior of callbacks for this MMU notifier. - * Used to determine which context an operation may be called. - * - * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not - * block - */ - int flags; - - /* * Called either by mmu_notifier_unregister or when the mm is * being destroyed by exit_mmap, always before all pages are * freed. This can run concurrently with other mmu notifier @@ -153,7 +140,9 @@ struct mmu_notifier_ops { * * If blockable argument is set to false then the callback cannot * sleep and has to return with -EAGAIN. 0 should be returned - * otherwise. + * otherwise. Please note that if invalidate_range_start approves + * a non-blocking behavior then the same applies to + * invalidate_range_end. * */ int (*invalidate_range_start)(struct mmu_notifier *mn, @@ -181,10 +170,6 @@ struct mmu_notifier_ops { * Note that this function might be called with just a sub-range * of what was passed to invalidate_range_start()/end(), if * called between those functions. - * - * If this callback cannot block, and invalidate_range_{start,end} - * cannot block, mmu_notifier_ops.flags should have - * MMU_INVALIDATE_DOES_NOT_BLOCK set. */ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); @@ -239,7 +224,6 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, bool only_end); extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end); -extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm); static inline void mmu_notifier_release(struct mm_struct *mm) { @@ -493,11 +477,6 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, { } -static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm) -{ - return false; -} - static inline void mmu_notifier_mm_init(struct mm_struct *mm) { } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1e22d96734e0..9f0caccd5833 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -161,8 +161,10 @@ enum node_stat_item { NR_SLAB_UNRECLAIMABLE, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ + WORKINGSET_NODES, WORKINGSET_REFAULT, WORKINGSET_ACTIVATE, + WORKINGSET_RESTORE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. @@ -180,7 +182,7 @@ enum node_stat_item { NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ - NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ + NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_VM_NODE_STAT_ITEMS }; @@ -668,16 +670,6 @@ typedef struct pglist_data { wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; #endif -#ifdef CONFIG_NUMA_BALANCING - /* Lock serializing the migrate rate limiting window */ - spinlock_t numabalancing_migrate_lock; - - /* Rate limiting time interval */ - unsigned long numabalancing_migrate_next_window; - - /* Number of pages migrated during the rate limiting time interval */ - unsigned long numabalancing_migrate_nr_pages; -#endif /* * This is a per-node reserve of pages that are not available * to userspace allocations. diff --git a/include/linux/module.h b/include/linux/module.h index f807f15bebbe..fce6b4335e36 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -20,6 +20,7 @@ #include <linux/export.h> #include <linux/rbtree_latch.h> #include <linux/error-injection.h> +#include <linux/tracepoint-defs.h> #include <linux/percpu.h> #include <asm/module.h> @@ -123,7 +124,6 @@ extern void cleanup_module(void); #define late_initcall_sync(fn) module_init(fn) #define console_initcall(fn) module_init(fn) -#define security_initcall(fn) module_init(fn) /* Each module must use one module_init(). */ #define module_init(initfn) \ @@ -430,7 +430,7 @@ struct module { #ifdef CONFIG_TRACEPOINTS unsigned int num_tracepoints; - struct tracepoint * const *tracepoints_ptrs; + tracepoint_ptr_t *tracepoints_ptrs; #endif #ifdef HAVE_JUMP_LABEL struct jump_entry *jump_entries; diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index 6675b9f81979..34de06b426ef 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -7,6 +7,7 @@ #include <net/net_namespace.h> #include <net/sock.h> #include <net/fib_notifier.h> +#include <net/ip_fib.h> /** * struct vif_device - interface representor for multicast routing @@ -283,6 +284,12 @@ void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg); int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mr_mfc *c, struct rtmsg *rtm); +int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, + struct netlink_callback *cb, + int (*fill)(struct mr_table *mrt, struct sk_buff *skb, + u32 portid, u32 seq, struct mr_mfc *c, + int cmd, int flags), + spinlock_t *lock, struct fib_dump_filter *filter); int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct mr_table *(*iter)(struct net *net, struct mr_table *mrt), @@ -290,7 +297,7 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), - spinlock_t *lock); + spinlock_t *lock, struct fib_dump_filter *filter); int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, int (*rules_dump)(struct net *net, @@ -340,7 +347,7 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb, struct sk_buff *skb, u32 portid, u32 seq, struct mr_mfc *c, int cmd, int flags), - spinlock_t *lock) + spinlock_t *lock, struct fib_dump_filter *filter) { return -EINVAL; } diff --git a/include/linux/msi.h b/include/linux/msi.h index 5839d8062dfc..0e9c50052ff3 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -317,11 +317,18 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, int virq, int nvec, msi_alloc_info_t *args); struct irq_domain * -platform_msi_create_device_domain(struct device *dev, - unsigned int nvec, - irq_write_msi_msg_t write_msi_msg, - const struct irq_domain_ops *ops, - void *host_data); +__platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + bool is_tree, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data); + +#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ + __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) +#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ + __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) + int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index e93837f647de..1d3ade69d39a 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -23,7 +23,6 @@ #include <linux/mutex.h> #include <linux/kref.h> #include <linux/sysfs.h> -#include <linux/workqueue.h> struct hd_geometry; struct mtd_info; @@ -44,9 +43,9 @@ struct mtd_blktrans_dev { struct kref ref; struct gendisk *disk; struct attribute_group *disk_attributes; - struct workqueue_struct *wq; - struct work_struct work; struct request_queue *rq; + struct list_head rq_list; + struct blk_mq_tag_set *tag_set; spinlock_t queue_lock; void *priv; fmode_t file_mode; diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h new file mode 100644 index 000000000000..0b6b59f7cfbd --- /dev/null +++ b/include/linux/mtd/jedec.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> + * Steven J. Hill <sjhill@realitydiluted.com> + * Thomas Gleixner <tglx@linutronix.de> + * + * Contains all JEDEC related definitions + */ + +#ifndef __LINUX_MTD_JEDEC_H +#define __LINUX_MTD_JEDEC_H + +struct jedec_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +/* JEDEC features */ +#define JEDEC_FEATURE_16_BIT_BUS (1 << 0) + +struct nand_jedec_params { + /* rev info and features block */ + /* 'J' 'E' 'S' 'D' */ + u8 sig[4]; + __le16 revision; + __le16 features; + u8 opt_cmd[3]; + __le16 sec_cmd; + u8 num_of_param_pages; + u8 reserved0[18]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id[6]; + u8 reserved1[10]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + u8 reserved2[6]; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + u8 programs_per_page; + u8 multi_plane_addr; + u8 multi_plane_op_attr; + u8 reserved3[38]; + + /* electrical parameter block */ + __le16 async_sdr_speed_grade; + __le16 toggle_ddr_speed_grade; + __le16 sync_ddr_speed_grade; + u8 async_sdr_features; + u8 toggle_ddr_features; + u8 sync_ddr_features; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_r_multi_plane; + __le16 t_ccs; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + __le16 clk_pin_capacitance_typ; + u8 driver_strength_support; + __le16 t_adl; + u8 reserved4[36]; + + /* ECC and endurance block */ + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + struct jedec_ecc_info ecc_info[4]; + u8 reserved5[29]; + + /* reserved */ + u8 reserved6[148]; + + /* vendor */ + __le16 vendor_rev_num; + u8 reserved7[88]; + + /* CRC for Parameter Page */ + __le16 crc; +} __packed; + +#endif /* __LINUX_MTD_JEDEC_H */ diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h index 98f20ef05d60..b8106651f807 100644 --- a/include/linux/mtd/nand_bch.h +++ b/include/linux/mtd/nand_bch.h @@ -12,6 +12,7 @@ #define __MTD_NAND_BCH_H__ struct mtd_info; +struct nand_chip; struct nand_bch_control; #if defined(CONFIG_MTD_NAND_ECC_BCH) @@ -21,14 +22,14 @@ static inline int mtd_nand_has_bch(void) { return 1; } /* * Calculate BCH ecc code */ -int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, u_char *ecc_code); /* * Detect and correct bit errors */ -int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, - u_char *calc_ecc); +int nand_bch_correct_data(struct nand_chip *chip, u_char *dat, + u_char *read_ecc, u_char *calc_ecc); /* * Initialize BCH encoder/decoder */ @@ -43,14 +44,14 @@ void nand_bch_free(struct nand_bch_control *nbc); static inline int mtd_nand_has_bch(void) { return 0; } static inline int -nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, u_char *ecc_code) { return -1; } static inline int -nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, +nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { return -ENOTSUPP; diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 8a2decf7462c..0b3bb156c344 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -13,28 +13,30 @@ #ifndef __MTD_NAND_ECC_H__ #define __MTD_NAND_ECC_H__ -struct mtd_info; +struct nand_chip; /* * Calculate 3 byte ECC code for eccsize byte block */ void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, - u_char *ecc_code); + u_char *ecc_code, bool sm_order); /* * Calculate 3 byte ECC code for 256/512 byte block */ -int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); +int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat, + u_char *ecc_code); /* * Detect and correct a 1 bit error for eccsize byte block */ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, - unsigned int eccsize); + unsigned int eccsize, bool sm_order); /* * Detect and correct a 1 bit error for 256/512 byte block */ -int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); +int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc, + u_char *calc_ecc); #endif /* __MTD_NAND_ECC_H__ */ diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h new file mode 100644 index 000000000000..339ac798568e --- /dev/null +++ b/include/linux/mtd/onfi.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> + * Steven J. Hill <sjhill@realitydiluted.com> + * Thomas Gleixner <tglx@linutronix.de> + * + * Contains all ONFI related definitions + */ + +#ifndef __LINUX_MTD_ONFI_H +#define __LINUX_MTD_ONFI_H + +#include <linux/types.h> + +/* ONFI version bits */ +#define ONFI_VERSION_1_0 BIT(1) +#define ONFI_VERSION_2_0 BIT(2) +#define ONFI_VERSION_2_1 BIT(3) +#define ONFI_VERSION_2_2 BIT(4) +#define ONFI_VERSION_2_3 BIT(5) +#define ONFI_VERSION_3_0 BIT(6) +#define ONFI_VERSION_3_1 BIT(7) +#define ONFI_VERSION_3_2 BIT(8) +#define ONFI_VERSION_4_0 BIT(9) + +/* ONFI features */ +#define ONFI_FEATURE_16_BIT_BUS (1 << 0) +#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) + +/* ONFI timing mode, used in both asynchronous and synchronous mode */ +#define ONFI_TIMING_MODE_0 (1 << 0) +#define ONFI_TIMING_MODE_1 (1 << 1) +#define ONFI_TIMING_MODE_2 (1 << 2) +#define ONFI_TIMING_MODE_3 (1 << 3) +#define ONFI_TIMING_MODE_4 (1 << 4) +#define ONFI_TIMING_MODE_5 (1 << 5) +#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) + +/* ONFI feature number/address */ +#define ONFI_FEATURE_NUMBER 256 +#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 + +/* Vendor-specific feature address (Micron) */ +#define ONFI_FEATURE_ADDR_READ_RETRY 0x89 +#define ONFI_FEATURE_ON_DIE_ECC 0x90 +#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3) + +/* ONFI subfeature parameters length */ +#define ONFI_SUBFEATURE_PARAM_LEN 4 + +/* ONFI optional commands SET/GET FEATURES supported? */ +#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) + +struct nand_onfi_params { + /* rev info and features block */ + /* 'O' 'N' 'F' 'I' */ + u8 sig[4]; + __le16 revision; + __le16 features; + __le16 opt_cmd; + u8 reserved0[2]; + __le16 ext_param_page_length; /* since ONFI 2.1 */ + u8 num_of_param_pages; /* since ONFI 2.1 */ + u8 reserved1[17]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id; + __le16 date_code; + u8 reserved2[13]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + __le32 data_bytes_per_ppage; + __le16 spare_bytes_per_ppage; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + __le16 bb_per_lun; + __le16 block_endurance; + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + u8 programs_per_page; + u8 ppage_attr; + u8 ecc_bits; + u8 interleaved_bits; + u8 interleaved_ops; + u8 reserved3[13]; + + /* electrical parameter block */ + u8 io_pin_capacitance_max; + __le16 async_timing_mode; + __le16 program_cache_timing_mode; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_ccs; + __le16 src_sync_timing_mode; + u8 src_ssync_features; + __le16 clk_pin_capacitance_typ; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + u8 input_pin_capacitance_max; + u8 driver_strength_support; + __le16 t_int_r; + __le16 t_adl; + u8 reserved4[8]; + + /* vendor */ + __le16 vendor_revision; + u8 vendor[88]; + + __le16 crc; +} __packed; + +#define ONFI_CRC_BASE 0x4F4E + +/* Extended ECC information Block Definition (since ONFI 2.1) */ +struct onfi_ext_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +#define ONFI_SECTION_TYPE_0 0 /* Unused section. */ +#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ +#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ +struct onfi_ext_section { + u8 type; + u8 length; +} __packed; + +#define ONFI_EXT_SECTION_MAX 8 + +/* Extended Parameter Page Definition (since ONFI 2.1) */ +struct onfi_ext_param_page { + __le16 crc; + u8 sig[4]; /* 'E' 'P' 'P' 'S' */ + u8 reserved0[10]; + struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; + + /* + * The actual size of the Extended Parameter Page is in + * @ext_param_page_length of nand_onfi_params{}. + * The following are the variable length sections. + * So we do not add any fields below. Please see the ONFI spec. + */ +} __packed; + +/** + * struct onfi_params - ONFI specific parameters that will be reused + * @version: ONFI version (BCD encoded), 0 if ONFI is not supported + * @tPROG: Page program time + * @tBERS: Block erase time + * @tR: Page read time + * @tCCS: Change column setup time + * @async_timing_mode: Supported asynchronous timing mode + * @vendor_revision: Vendor specific revision number + * @vendor: Vendor specific data + */ +struct onfi_params { + int version; + u16 tPROG; + u16 tBERS; + u16 tR; + u16 tCCS; + u16 async_timing_mode; + u16 vendor_revision; + u8 vendor[88]; +}; + +#endif /* __LINUX_MTD_ONFI_H */ diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h new file mode 100644 index 000000000000..bc11eb6b593b --- /dev/null +++ b/include/linux/mtd/platnand.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> + * Steven J. Hill <sjhill@realitydiluted.com> + * Thomas Gleixner <tglx@linutronix.de> + * + * Contains all platform NAND related definitions. + */ + +#ifndef __LINUX_MTD_PLATNAND_H +#define __LINUX_MTD_PLATNAND_H + +#include <linux/mtd/partitions.h> +#include <linux/mtd/rawnand.h> +#include <linux/platform_device.h> + +/** + * struct platform_nand_chip - chip level device structure + * @nr_chips: max. number of chips to scan for + * @chip_offset: chip number offset + * @nr_partitions: number of partitions pointed to by partitions (or zero) + * @partitions: mtd partition list + * @chip_delay: R/B delay value in us + * @options: Option flags, e.g. 16bit buswidth + * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH + * @part_probe_types: NULL-terminated array of probe types + */ +struct platform_nand_chip { + int nr_chips; + int chip_offset; + int nr_partitions; + struct mtd_partition *partitions; + int chip_delay; + unsigned int options; + unsigned int bbt_options; + const char **part_probe_types; +}; + +/** + * struct platform_nand_ctrl - controller level device structure + * @probe: platform specific function to probe/setup hardware + * @remove: platform specific function to remove/teardown hardware + * @dev_ready: platform specific function to read ready/busy pin + * @select_chip: platform specific chip select function + * @cmd_ctrl: platform specific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @write_buf: platform specific function for write buffer + * @read_buf: platform specific function for read buffer + * @priv: private data to transport driver specific settings + * + * All fields are optional and depend on the hardware driver requirements + */ +struct platform_nand_ctrl { + int (*probe)(struct platform_device *pdev); + void (*remove)(struct platform_device *pdev); + int (*dev_ready)(struct nand_chip *chip); + void (*select_chip)(struct nand_chip *chip, int cs); + void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl); + void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len); + void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len); + void *priv; +}; + +/** + * struct platform_nand_data - container structure for platform-specific data + * @chip: chip level chip structure + * @ctrl: controller level device structure + */ +struct platform_nand_data { + struct platform_nand_chip chip; + struct platform_nand_ctrl ctrl; +}; + +#endif /* __LINUX_MTD_PLATNAND_H */ diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index efb2345359bb..e10b126e148f 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -21,22 +21,12 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> +#include <linux/mtd/jedec.h> +#include <linux/mtd/onfi.h> #include <linux/of.h> #include <linux/types.h> -struct nand_flash_dev; - -/* Scan and identify a NAND device */ -int nand_scan_with_ids(struct mtd_info *mtd, int max_chips, - struct nand_flash_dev *ids); - -static inline int nand_scan(struct mtd_info *mtd, int max_chips) -{ - return nand_scan_with_ids(mtd, max_chips, NULL); -} - -/* Internal helper for board drivers which need to override command function */ -void nand_wait_ready(struct mtd_info *mtd); +struct nand_chip; /* The maximum number of NAND chips in an array */ #define NAND_MAX_CHIPS 8 @@ -131,9 +121,11 @@ enum nand_ecc_algo { #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) #define NAND_ECC_MAXIMIZE BIT(1) -/* Bit mask for flags passed to do_nand_read_ecc */ -#define NAND_GET_DEVICE 0x80 - +/* + * When using software implementation of Hamming, we can specify which byte + * ordering should be used. + */ +#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) /* * Option constants for bizarre disfunctionality and real @@ -175,9 +167,7 @@ enum nand_ecc_algo { #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG /* Macros to identify the above */ -#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) -#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE) /* Non chip related options */ /* This option skips the bbt scan during initialization. */ @@ -198,10 +188,10 @@ enum nand_ecc_algo { #define NAND_USE_BOUNCE_BUFFER 0x00100000 /* - * In case your controller is implementing ->cmd_ctrl() and is relying on the - * default ->cmdfunc() implementation, you may want to let the core handle the - * tCCS delay which is required when a column change (RNDIN or RNDOUT) is - * requested. + * In case your controller is implementing ->legacy.cmd_ctrl() and is relying + * on the default ->cmdfunc() implementation, you may want to let the core + * handle the tCCS delay which is required when a column change (RNDIN or + * RNDOUT) is requested. * If your controller already takes care of this delay, you don't need to set * this flag. */ @@ -222,250 +212,6 @@ enum nand_ecc_algo { #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 -/* Keep gcc happy */ -struct nand_chip; - -/* ONFI version bits */ -#define ONFI_VERSION_1_0 BIT(1) -#define ONFI_VERSION_2_0 BIT(2) -#define ONFI_VERSION_2_1 BIT(3) -#define ONFI_VERSION_2_2 BIT(4) -#define ONFI_VERSION_2_3 BIT(5) -#define ONFI_VERSION_3_0 BIT(6) -#define ONFI_VERSION_3_1 BIT(7) -#define ONFI_VERSION_3_2 BIT(8) -#define ONFI_VERSION_4_0 BIT(9) - -/* ONFI features */ -#define ONFI_FEATURE_16_BIT_BUS (1 << 0) -#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) - -/* ONFI timing mode, used in both asynchronous and synchronous mode */ -#define ONFI_TIMING_MODE_0 (1 << 0) -#define ONFI_TIMING_MODE_1 (1 << 1) -#define ONFI_TIMING_MODE_2 (1 << 2) -#define ONFI_TIMING_MODE_3 (1 << 3) -#define ONFI_TIMING_MODE_4 (1 << 4) -#define ONFI_TIMING_MODE_5 (1 << 5) -#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) - -/* ONFI feature number/address */ -#define ONFI_FEATURE_NUMBER 256 -#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 - -/* Vendor-specific feature address (Micron) */ -#define ONFI_FEATURE_ADDR_READ_RETRY 0x89 -#define ONFI_FEATURE_ON_DIE_ECC 0x90 -#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3) - -/* ONFI subfeature parameters length */ -#define ONFI_SUBFEATURE_PARAM_LEN 4 - -/* ONFI optional commands SET/GET FEATURES supported? */ -#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) - -struct nand_onfi_params { - /* rev info and features block */ - /* 'O' 'N' 'F' 'I' */ - u8 sig[4]; - __le16 revision; - __le16 features; - __le16 opt_cmd; - u8 reserved0[2]; - __le16 ext_param_page_length; /* since ONFI 2.1 */ - u8 num_of_param_pages; /* since ONFI 2.1 */ - u8 reserved1[17]; - - /* manufacturer information block */ - char manufacturer[12]; - char model[20]; - u8 jedec_id; - __le16 date_code; - u8 reserved2[13]; - - /* memory organization block */ - __le32 byte_per_page; - __le16 spare_bytes_per_page; - __le32 data_bytes_per_ppage; - __le16 spare_bytes_per_ppage; - __le32 pages_per_block; - __le32 blocks_per_lun; - u8 lun_count; - u8 addr_cycles; - u8 bits_per_cell; - __le16 bb_per_lun; - __le16 block_endurance; - u8 guaranteed_good_blocks; - __le16 guaranteed_block_endurance; - u8 programs_per_page; - u8 ppage_attr; - u8 ecc_bits; - u8 interleaved_bits; - u8 interleaved_ops; - u8 reserved3[13]; - - /* electrical parameter block */ - u8 io_pin_capacitance_max; - __le16 async_timing_mode; - __le16 program_cache_timing_mode; - __le16 t_prog; - __le16 t_bers; - __le16 t_r; - __le16 t_ccs; - __le16 src_sync_timing_mode; - u8 src_ssync_features; - __le16 clk_pin_capacitance_typ; - __le16 io_pin_capacitance_typ; - __le16 input_pin_capacitance_typ; - u8 input_pin_capacitance_max; - u8 driver_strength_support; - __le16 t_int_r; - __le16 t_adl; - u8 reserved4[8]; - - /* vendor */ - __le16 vendor_revision; - u8 vendor[88]; - - __le16 crc; -} __packed; - -#define ONFI_CRC_BASE 0x4F4E - -/* Extended ECC information Block Definition (since ONFI 2.1) */ -struct onfi_ext_ecc_info { - u8 ecc_bits; - u8 codeword_size; - __le16 bb_per_lun; - __le16 block_endurance; - u8 reserved[2]; -} __packed; - -#define ONFI_SECTION_TYPE_0 0 /* Unused section. */ -#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ -#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ -struct onfi_ext_section { - u8 type; - u8 length; -} __packed; - -#define ONFI_EXT_SECTION_MAX 8 - -/* Extended Parameter Page Definition (since ONFI 2.1) */ -struct onfi_ext_param_page { - __le16 crc; - u8 sig[4]; /* 'E' 'P' 'P' 'S' */ - u8 reserved0[10]; - struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; - - /* - * The actual size of the Extended Parameter Page is in - * @ext_param_page_length of nand_onfi_params{}. - * The following are the variable length sections. - * So we do not add any fields below. Please see the ONFI spec. - */ -} __packed; - -struct jedec_ecc_info { - u8 ecc_bits; - u8 codeword_size; - __le16 bb_per_lun; - __le16 block_endurance; - u8 reserved[2]; -} __packed; - -/* JEDEC features */ -#define JEDEC_FEATURE_16_BIT_BUS (1 << 0) - -struct nand_jedec_params { - /* rev info and features block */ - /* 'J' 'E' 'S' 'D' */ - u8 sig[4]; - __le16 revision; - __le16 features; - u8 opt_cmd[3]; - __le16 sec_cmd; - u8 num_of_param_pages; - u8 reserved0[18]; - - /* manufacturer information block */ - char manufacturer[12]; - char model[20]; - u8 jedec_id[6]; - u8 reserved1[10]; - - /* memory organization block */ - __le32 byte_per_page; - __le16 spare_bytes_per_page; - u8 reserved2[6]; - __le32 pages_per_block; - __le32 blocks_per_lun; - u8 lun_count; - u8 addr_cycles; - u8 bits_per_cell; - u8 programs_per_page; - u8 multi_plane_addr; - u8 multi_plane_op_attr; - u8 reserved3[38]; - - /* electrical parameter block */ - __le16 async_sdr_speed_grade; - __le16 toggle_ddr_speed_grade; - __le16 sync_ddr_speed_grade; - u8 async_sdr_features; - u8 toggle_ddr_features; - u8 sync_ddr_features; - __le16 t_prog; - __le16 t_bers; - __le16 t_r; - __le16 t_r_multi_plane; - __le16 t_ccs; - __le16 io_pin_capacitance_typ; - __le16 input_pin_capacitance_typ; - __le16 clk_pin_capacitance_typ; - u8 driver_strength_support; - __le16 t_adl; - u8 reserved4[36]; - - /* ECC and endurance block */ - u8 guaranteed_good_blocks; - __le16 guaranteed_block_endurance; - struct jedec_ecc_info ecc_info[4]; - u8 reserved5[29]; - - /* reserved */ - u8 reserved6[148]; - - /* vendor */ - __le16 vendor_rev_num; - u8 reserved7[88]; - - /* CRC for Parameter Page */ - __le16 crc; -} __packed; - -/** - * struct onfi_params - ONFI specific parameters that will be reused - * @version: ONFI version (BCD encoded), 0 if ONFI is not supported - * @tPROG: Page program time - * @tBERS: Block erase time - * @tR: Page read time - * @tCCS: Change column setup time - * @async_timing_mode: Supported asynchronous timing mode - * @vendor_revision: Vendor specific revision number - * @vendor: Vendor specific data - */ -struct onfi_params { - int version; - u16 tPROG; - u16 tBERS; - u16 tR; - u16 tCCS; - u16 async_timing_mode; - u16 vendor_revision; - u8 vendor[88]; -}; - /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name @@ -646,31 +392,28 @@ struct nand_ecc_ctrl { void *priv; u8 *calc_buf; u8 *code_buf; - void (*hwctl)(struct mtd_info *mtd, int mode); - int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, - uint8_t *ecc_code); - int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, - uint8_t *calc_ecc); - int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page); - int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page); - int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page); - int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, - uint32_t offs, uint32_t len, uint8_t *buf, int page); - int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, - uint32_t offset, uint32_t data_len, - const uint8_t *data_buf, int oob_required, int page); - int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page); - int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, - int page); - int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, - int page); - int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page); - int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, - int page); + void (*hwctl)(struct nand_chip *chip, int mode); + int (*calculate)(struct nand_chip *chip, const uint8_t *dat, + uint8_t *ecc_code); + int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc, + uint8_t *calc_ecc); + int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); + int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); + int (*read_page)(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); + int (*read_subpage)(struct nand_chip *chip, uint32_t offs, + uint32_t len, uint8_t *buf, int page); + int (*write_subpage)(struct nand_chip *chip, uint32_t offset, + uint32_t data_len, const uint8_t *data_buf, + int oob_required, int page); + int (*write_page)(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); + int (*write_oob_raw)(struct nand_chip *chip, int page); + int (*read_oob_raw)(struct nand_chip *chip, int page); + int (*read_oob)(struct nand_chip *chip, int page); + int (*write_oob)(struct nand_chip *chip, int page); }; /** @@ -800,24 +543,6 @@ nand_get_sdr_timings(const struct nand_data_interface *conf) } /** - * struct nand_manufacturer_ops - NAND Manufacturer operations - * @detect: detect the NAND memory organization and capabilities - * @init: initialize all vendor specific fields (like the ->read_retry() - * implementation) if any. - * @cleanup: the ->init() function may have allocated resources, ->cleanup() - * is here to let vendor specific code release those resources. - * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter - * page. This is called after the checksum is verified. - */ -struct nand_manufacturer_ops { - void (*detect)(struct nand_chip *chip); - int (*init)(struct nand_chip *chip); - void (*cleanup)(struct nand_chip *chip); - void (*fixup_onfi_param_page)(struct nand_chip *chip, - struct nand_onfi_params *p); -}; - -/** * struct nand_op_cmd_instr - Definition of a command instruction * @opcode: the command to issue in one cycle */ @@ -1175,44 +900,72 @@ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only); /** + * struct nand_legacy - NAND chip legacy fields/hooks + * @IO_ADDR_R: address to read the 8 I/O lines of the flash device + * @IO_ADDR_W: address to write the 8 I/O lines of the flash device + * @read_byte: read one byte from the chip + * @write_byte: write a single byte to the chip on the low 8 I/O lines + * @write_buf: write data from the buffer to the chip + * @read_buf: read data from the chip into the buffer + * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used + * to write command and address + * @cmdfunc: hardware specific function for writing commands to the chip. + * @dev_ready: hardware specific function for accessing device ready/busy line. + * If set to NULL no access to ready/busy is available and the + * ready/busy information is read from the chip status register. + * @waitfunc: hardware specific function for wait on ready. + * @block_bad: check if a block is bad, using OOB markers + * @block_markbad: mark a block bad + * @erase: erase function + * @set_features: set the NAND chip features + * @get_features: get the NAND chip features + * @chip_delay: chip dependent delay for transferring data from array to read + * regs (tR). + * + * If you look at this structure you're already wrong. These fields/hooks are + * all deprecated. + */ +struct nand_legacy { + void __iomem *IO_ADDR_R; + void __iomem *IO_ADDR_W; + u8 (*read_byte)(struct nand_chip *chip); + void (*write_byte)(struct nand_chip *chip, u8 byte); + void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len); + void (*read_buf)(struct nand_chip *chip, u8 *buf, int len); + void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl); + void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column, + int page_addr); + int (*dev_ready)(struct nand_chip *chip); + int (*waitfunc)(struct nand_chip *chip); + int (*block_bad)(struct nand_chip *chip, loff_t ofs); + int (*block_markbad)(struct nand_chip *chip, loff_t ofs); + int (*erase)(struct nand_chip *chip, int page); + int (*set_features)(struct nand_chip *chip, int feature_addr, + u8 *subfeature_para); + int (*get_features)(struct nand_chip *chip, int feature_addr, + u8 *subfeature_para); + int chip_delay; +}; + +/** * struct nand_chip - NAND Private Flash Chip Data * @mtd: MTD device registered to the MTD framework - * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the - * flash device - * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the - * flash device. - * @read_byte: [REPLACEABLE] read one byte from the chip - * @read_word: [REPLACEABLE] read one word from the chip - * @write_byte: [REPLACEABLE] write a single byte to the chip on the - * low 8 I/O lines - * @write_buf: [REPLACEABLE] write data from the buffer to the chip - * @read_buf: [REPLACEABLE] read data from the chip into the buffer + * @legacy: All legacy fields/hooks. If you develop a new driver, + * don't even try to use any of these fields/hooks, and if + * you're modifying an existing driver that is using those + * fields/hooks, you should consider reworking the driver + * avoid using them. * @select_chip: [REPLACEABLE] select chip nr - * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers - * @block_markbad: [REPLACEABLE] mark a block bad - * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling - * ALE/CLE/nCE. Also used to write command and address - * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing - * device ready/busy line. If set to NULL no access to - * ready/busy is available and the ready/busy information - * is read from the chip status register. - * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing - * commands to the chip. - * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on - * ready. * @exec_op: controller specific method to execute NAND operations. * This method replaces ->cmdfunc(), - * ->{read,write}_{buf,byte,word}(), ->dev_ready() and - * ->waifunc(). + * ->legacy.{read,write}_{buf,byte,word}(), + * ->legacy.dev_ready() and ->waifunc(). * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform * @dummy_controller: dummy controller implementation for drivers that can * only control a single chip - * @erase: [REPLACEABLE] erase function - * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring - * data from array to read regs (tR). * @state: [INTERN] the current state of the NAND device * @oob_poi: "poison value buffer," used for laying out OOB data * before writing @@ -1260,8 +1013,6 @@ int nand_op_parser_exec_op(struct nand_chip *chip, * @blocks_per_die: [INTERN] The number of PEBs in a die * @data_interface: [INTERN] NAND interface timing information * @read_retries: [INTERN] the number of read retry modes supported - * @set_features: [REPLACEABLE] set the NAND chip features - * @get_features: [REPLACEABLE] get the NAND chip features * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this * means the configuration should not be applied but @@ -1283,35 +1034,17 @@ int nand_op_parser_exec_op(struct nand_chip *chip, struct nand_chip { struct mtd_info mtd; - void __iomem *IO_ADDR_R; - void __iomem *IO_ADDR_W; - uint8_t (*read_byte)(struct mtd_info *mtd); - u16 (*read_word)(struct mtd_info *mtd); - void (*write_byte)(struct mtd_info *mtd, uint8_t byte); - void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); - void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); - void (*select_chip)(struct mtd_info *mtd, int chip); - int (*block_bad)(struct mtd_info *mtd, loff_t ofs); - int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); - void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); - int (*dev_ready)(struct mtd_info *mtd); - void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, - int page_addr); - int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); + struct nand_legacy legacy; + + void (*select_chip)(struct nand_chip *chip, int cs); int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); - int (*erase)(struct mtd_info *mtd, int page); - int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip, - int feature_addr, uint8_t *subfeature_para); - int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip, - int feature_addr, uint8_t *subfeature_para); - int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode); - int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, + int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + int (*setup_data_interface)(struct nand_chip *chip, int chipnr, const struct nand_data_interface *conf); - int chip_delay; unsigned int options; unsigned int bbt_options; @@ -1420,27 +1153,6 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip) } /* - * NAND Flash Manufacturer ID Codes - */ -#define NAND_MFR_TOSHIBA 0x98 -#define NAND_MFR_ESMT 0xc8 -#define NAND_MFR_SAMSUNG 0xec -#define NAND_MFR_FUJITSU 0x04 -#define NAND_MFR_NATIONAL 0x8f -#define NAND_MFR_RENESAS 0x07 -#define NAND_MFR_STMICRO 0x20 -#define NAND_MFR_HYNIX 0xad -#define NAND_MFR_MICRON 0x2c -#define NAND_MFR_AMD 0x01 -#define NAND_MFR_MACRONIX 0xc2 -#define NAND_MFR_EON 0x92 -#define NAND_MFR_SANDISK 0x45 -#define NAND_MFR_INTEL 0x89 -#define NAND_MFR_ATO 0x9b -#define NAND_MFR_WINBOND 0xef - - -/* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page * size). All these chips have 512 bytes NAND page size. @@ -1519,114 +1231,7 @@ struct nand_flash_dev { int onfi_timing_mode_default; }; -/** - * struct nand_manufacturer - NAND Flash Manufacturer structure - * @name: Manufacturer name - * @id: manufacturer ID code of device. - * @ops: manufacturer operations -*/ -struct nand_manufacturer { - int id; - char *name; - const struct nand_manufacturer_ops *ops; -}; - -const struct nand_manufacturer *nand_get_manufacturer(u8 id); - -static inline const char * -nand_manufacturer_name(const struct nand_manufacturer *manufacturer) -{ - return manufacturer ? manufacturer->name : "Unknown"; -} - -extern struct nand_flash_dev nand_flash_ids[]; - -extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; -extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; -extern const struct nand_manufacturer_ops hynix_nand_manuf_ops; -extern const struct nand_manufacturer_ops micron_nand_manuf_ops; -extern const struct nand_manufacturer_ops amd_nand_manuf_ops; -extern const struct nand_manufacturer_ops macronix_nand_manuf_ops; - int nand_create_bbt(struct nand_chip *chip); -int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); -int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); -int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt); -int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, - int allowbbt); - -/** - * struct platform_nand_chip - chip level device structure - * @nr_chips: max. number of chips to scan for - * @chip_offset: chip number offset - * @nr_partitions: number of partitions pointed to by partitions (or zero) - * @partitions: mtd partition list - * @chip_delay: R/B delay value in us - * @options: Option flags, e.g. 16bit buswidth - * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH - * @part_probe_types: NULL-terminated array of probe types - */ -struct platform_nand_chip { - int nr_chips; - int chip_offset; - int nr_partitions; - struct mtd_partition *partitions; - int chip_delay; - unsigned int options; - unsigned int bbt_options; - const char **part_probe_types; -}; - -/* Keep gcc happy */ -struct platform_device; - -/** - * struct platform_nand_ctrl - controller level device structure - * @probe: platform specific function to probe/setup hardware - * @remove: platform specific function to remove/teardown hardware - * @dev_ready: platform specific function to read ready/busy pin - * @select_chip: platform specific chip select function - * @cmd_ctrl: platform specific function for controlling - * ALE/CLE/nCE. Also used to write command and address - * @write_buf: platform specific function for write buffer - * @read_buf: platform specific function for read buffer - * @priv: private data to transport driver specific settings - * - * All fields are optional and depend on the hardware driver requirements - */ -struct platform_nand_ctrl { - int (*probe)(struct platform_device *pdev); - void (*remove)(struct platform_device *pdev); - int (*dev_ready)(struct mtd_info *mtd); - void (*select_chip)(struct mtd_info *mtd, int chip); - void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); - void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); - void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); - void *priv; -}; - -/** - * struct platform_nand_data - container structure for platform-specific data - * @chip: chip level chip structure - * @ctrl: controller level device structure - */ -struct platform_nand_data { - struct platform_nand_chip chip; - struct platform_nand_ctrl ctrl; -}; - -/* return the supported asynchronous timing mode. */ -static inline int onfi_get_async_timing_mode(struct nand_chip *chip) -{ - if (!chip->parameters.onfi) - return ONFI_TIMING_MODE_UNKNOWN; - - return chip->parameters.onfi->async_timing_mode; -} - -int onfi_fill_data_interface(struct nand_chip *chip, - enum nand_data_interface_type type, - int timing_mode); /* * Check if it is a SLC nand. @@ -1658,9 +1263,6 @@ static inline int nand_opcode_8bits(unsigned int command) return 0; } -/* get timing characteristics from ONFI timing mode. */ -const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); - int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, @@ -1670,37 +1272,22 @@ int nand_ecc_choose_conf(struct nand_chip *chip, const struct nand_ecc_caps *caps, int oobavail); /* Default write_oob implementation */ -int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); - -/* Default write_oob syndrome implementation */ -int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - int page); +int nand_write_oob_std(struct nand_chip *chip, int page); /* Default read_oob implementation */ -int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); +int nand_read_oob_std(struct nand_chip *chip, int page); -/* Default read_oob syndrome implementation */ -int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, - int page); - -/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */ -int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); -int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); /* Stub used by drivers that do not support GET/SET FEATURES operations */ -int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip, - int addr, u8 *subfeature_param); +int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, + u8 *subfeature_param); /* Default read_page_raw implementation */ -int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page); -int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip, - u8 *buf, int oob_required, int page); +int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, + int page); /* Default write_page_raw implementation */ -int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page); -int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip, - const u8 *buf, int oob_required, int page); +int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); @@ -1710,7 +1297,6 @@ int nand_reset_op(struct nand_chip *chip); int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, unsigned int len); int nand_status_op(struct nand_chip *chip, u8 *status); -int nand_exit_status_op(struct nand_chip *chip); int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock); int nand_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len); @@ -1734,16 +1320,25 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); +/* Scan and identify a NAND device */ +int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips, + struct nand_flash_dev *ids); + +static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips) +{ + return nand_scan_with_ids(chip, max_chips, NULL); +} + +/* Internal helper for board drivers which need to override command function */ +void nand_wait_ready(struct nand_chip *chip); + /* * Free resources held by the NAND device, must be called on error after a * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); /* Unregister the MTD device and calls nand_cleanup() */ -void nand_release(struct mtd_info *mtd); - -/* Default extended ID decoding function */ -void nand_decode_ext_id(struct nand_chip *chip); +void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index c922e97f205a..7f0c7303575e 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -239,6 +239,94 @@ enum spi_nor_option_flags { }; /** + * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type + * @size: the size of the sector/block erased by the erase type. + * JEDEC JESD216B imposes erase sizes to be a power of 2. + * @size_shift: @size is a power of 2, the shift is stored in + * @size_shift. + * @size_mask: the size mask based on @size_shift. + * @opcode: the SPI command op code to erase the sector/block. + * @idx: Erase Type index as sorted in the Basic Flash Parameter + * Table. It will be used to synchronize the supported + * Erase Types with the ones identified in the SFDP + * optional tables. + */ +struct spi_nor_erase_type { + u32 size; + u32 size_shift; + u32 size_mask; + u8 opcode; + u8 idx; +}; + +/** + * struct spi_nor_erase_command - Used for non-uniform erases + * The structure is used to describe a list of erase commands to be executed + * once we validate that the erase can be performed. The elements in the list + * are run-length encoded. + * @list: for inclusion into the list of erase commands. + * @count: how many times the same erase command should be + * consecutively used. + * @size: the size of the sector/block erased by the command. + * @opcode: the SPI command op code to erase the sector/block. + */ +struct spi_nor_erase_command { + struct list_head list; + u32 count; + u32 size; + u8 opcode; +}; + +/** + * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region + * @offset: the offset in the data array of erase region start. + * LSB bits are used as a bitmask encoding flags to + * determine if this region is overlaid, if this region is + * the last in the SPI NOR flash memory and to indicate + * all the supported erase commands inside this region. + * The erase types are sorted in ascending order with the + * smallest Erase Type size being at BIT(0). + * @size: the size of the region in bytes. + */ +struct spi_nor_erase_region { + u64 offset; + u64 size; +}; + +#define SNOR_ERASE_TYPE_MAX 4 +#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) + +#define SNOR_LAST_REGION BIT(4) +#define SNOR_OVERLAID_REGION BIT(5) + +#define SNOR_ERASE_FLAGS_MAX 6 +#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) + +/** + * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map + * @regions: array of erase regions. The regions are consecutive in + * address space. Walking through the regions is done + * incrementally. + * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform + * sector size (legacy implementation). + * @erase_type: an array of erase types shared by all the regions. + * The erase types are sorted in ascending order, with the + * smallest Erase Type size being the first member in the + * erase_type array. + * @uniform_erase_type: bitmask encoding erase types that can erase the + * entire memory. This member is completed at init by + * uniform and non-uniform SPI NOR flash memories if they + * support at least one erase type that can erase the + * entire memory. + */ +struct spi_nor_erase_map { + struct spi_nor_erase_region *regions; + struct spi_nor_erase_region uniform_region; + struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; + u8 uniform_erase_type; +}; + +/** * struct flash_info - Forward declaration of a structure used internally by * spi_nor_scan() */ @@ -262,6 +350,7 @@ struct flash_info; * @write_proto: the SPI protocol for write operations * @reg_proto the SPI protocol for read_reg/write_reg/erase operations * @cmd_buf: used by the write_reg + * @erase_map: the erase map of the SPI NOR * @prepare: [OPTIONAL] do some preparations for the * read/write/erase/lock/unlock operations * @unprepare: [OPTIONAL] do some post work after the @@ -297,6 +386,7 @@ struct spi_nor { bool sst_write_second; u32 flags; u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; + struct spi_nor_erase_map erase_map; int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); @@ -317,6 +407,35 @@ struct spi_nor { void *priv; }; +static u64 __maybe_unused +spi_nor_region_is_last(const struct spi_nor_erase_region *region) +{ + return region->offset & SNOR_LAST_REGION; +} + +static u64 __maybe_unused +spi_nor_region_end(const struct spi_nor_erase_region *region) +{ + return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; +} + +static void __maybe_unused +spi_nor_region_mark_end(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_LAST_REGION; +} + +static void __maybe_unused +spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) +{ + region->offset |= SNOR_OVERLAID_REGION; +} + +static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor) +{ + return !!nor->erase_map.uniform_erase_type; +} + static inline void spi_nor_set_flash_node(struct spi_nor *nor, struct device_node *np) { diff --git a/include/linux/ndctl.h b/include/linux/ndctl.h new file mode 100644 index 000000000000..cd5a293ce3ae --- /dev/null +++ b/include/linux/ndctl.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2014-2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU Lesser General Public License, + * version 2.1, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for + * more details. + */ +#ifndef _LINUX_NDCTL_H +#define _LINUX_NDCTL_H + +#include <uapi/linux/ndctl.h> + +enum { + ND_MIN_NAMESPACE_SIZE = PAGE_SIZE, +}; + +#endif /* _LINUX_NDCTL_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ca5ab98053c8..dc1d9ed33b31 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -535,6 +535,32 @@ static inline void napi_synchronize(const struct napi_struct *n) barrier(); } +/** + * napi_if_scheduled_mark_missed - if napi is running, set the + * NAPIF_STATE_MISSED + * @n: NAPI context + * + * If napi is running, set the NAPIF_STATE_MISSED, and return true if + * NAPI is scheduled. + **/ +static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF, __QUEUE_STATE_STACK_XOFF, @@ -583,6 +609,9 @@ struct netdev_queue { /* Subordinate device that the queue has been assigned to */ struct net_device *sb_dev; +#ifdef CONFIG_XDP_SOCKETS + struct xdp_umem *umem; +#endif /* * write-mostly part */ @@ -712,6 +741,9 @@ struct netdev_rx_queue { struct kobject kobj; struct net_device *dev; struct xdp_rxq_info xdp_rxq; +#ifdef CONFIG_XDP_SOCKETS + struct xdp_umem *umem; +#endif } ____cacheline_aligned_in_smp; /* @@ -1730,6 +1762,8 @@ enum netdev_priv_flags { * switch driver and used to set the phys state of the * switch port. * + * @wol_enabled: Wake-on-LAN is enabled + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -1974,7 +2008,6 @@ struct net_device { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; - struct pcpu_vstats __percpu *vstats; }; #if IS_ENABLED(CONFIG_GARP) @@ -2014,6 +2047,7 @@ struct net_device { struct lock_class_key *qdisc_tx_busylock; struct lock_class_key *qdisc_running_key; bool proto_down; + unsigned wol_enabled:1; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2317,6 +2351,7 @@ static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, struct packet_type { __be16 type; /* This is really htons(ether_type). */ + bool ignore_outgoing; struct net_device *dev; /* NULL is wildcarded here */ int (*func) (struct sk_buff *, struct net_device *, @@ -2355,6 +2390,12 @@ struct pcpu_sw_netstats { struct u64_stats_sync syncp; }; +struct pcpu_lstats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ @@ -2455,6 +2496,13 @@ struct netdev_notifier_info { struct netlink_ext_ack *extack; }; +struct netdev_notifier_info_ext { + struct netdev_notifier_info info; /* must be first */ + union { + u32 mtu; + } ext; +}; + struct netdev_notifier_change_info { struct netdev_notifier_info info; /* must be first */ unsigned int flags_changed; @@ -3597,6 +3645,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, return 0; } +bool dev_nit_active(struct net_device *dev); void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 07efffd0c759..bbe99d2b28b4 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, break; case NFPROTO_ARP: #ifdef CONFIG_NETFILTER_FAMILY_ARP + if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) + break; hook_head = rcu_dereference(net->nf.hooks_arp[hook]); #endif break; diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 03097fa70975..e142b2b5f1ea 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -19,7 +19,4 @@ struct ip_conntrack_stat { unsigned int search_restart; }; -/* call to create an explicit dependency on nf_conntrack. */ -void need_conntrack(void); - #endif /* _NF_CONNTRACK_COMMON_H */ diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h index ecf7dab81e9e..c6000046c966 100644 --- a/include/linux/netfilter/nfnetlink_osf.h +++ b/include/linux/netfilter/nfnetlink_osf.h @@ -27,6 +27,7 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family, const struct list_head *nf_osf_fingers); const char *nf_osf_find(const struct sk_buff *skb, - const struct list_head *nf_osf_fingers); + const struct list_head *nf_osf_fingers, + const int ttl_check); #endif /* _NFOSF_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 71f121b66ca8..4da90a6ab536 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -176,8 +176,11 @@ struct netlink_callback { void *data; /* the module that dump function belong to */ struct module *module; + struct netlink_ext_ack *extack; u16 family; u16 min_dump_alloc; + bool strict_check; + u16 answer_flags; unsigned int prev_seq, seq; long args[6]; }; diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 3ef82d3a78db..676f1ff161a9 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -31,8 +31,6 @@ struct netpoll { bool ipv6; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; - - struct work_struct cleanup_work; }; struct netpoll_info { @@ -63,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt); int __netpoll_setup(struct netpoll *np, struct net_device *ndev); int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); -void __netpoll_free_async(struct netpoll *np); +void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, struct net_device *dev); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index a0831e9d19c9..6e0417c02279 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -62,6 +62,7 @@ struct nfs_lock_context { struct nfs_open_context *open_context; fl_owner_t lockowner; atomic_t io_count; + struct rcu_head rcu_head; }; struct nfs4_state; @@ -82,6 +83,7 @@ struct nfs_open_context { struct list_head list; struct nfs4_threshold *mdsthreshold; + struct rcu_head rcu_head; }; struct nfs_open_dir_context { diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index bf39d9c92201..0fc0b9135d46 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -228,6 +228,9 @@ struct nfs_server { unsigned short mountd_port; unsigned short mountd_protocol; struct rpc_wait_queue uoc_rpcwaitq; + + /* XDR related information */ + unsigned int read_hdrsize; }; /* Server capabilities */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index bd1c889a9ed9..0e016252cfc6 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -608,8 +608,13 @@ struct nfs_pgio_args { __u32 count; unsigned int pgbase; struct page ** pages; - const u32 * bitmask; /* used by write */ - enum nfs3_stable_how stable; /* used by write */ + union { + unsigned int replen; /* used by read */ + struct { + const u32 * bitmask; /* used by write */ + enum nfs3_stable_how stable; /* used by write */ + }; + }; }; struct nfs_pgio_res { @@ -617,10 +622,16 @@ struct nfs_pgio_res { struct nfs_fattr * fattr; __u32 count; __u32 op_status; - int eof; /* used by read */ - struct nfs_writeverf * verf; /* used by write */ - const struct nfs_server *server; /* used by write */ - + union { + struct { + unsigned int replen; /* used by read */ + int eof; /* used by read */ + }; + struct { + struct nfs_writeverf * verf; /* used by write */ + const struct nfs_server *server; /* used by write */ + }; + }; }; /* @@ -1471,11 +1482,10 @@ struct nfs_pgio_header { const struct nfs_rw_ops *rw_ops; struct nfs_io_completion *io_completion; struct nfs_direct_req *dreq; - spinlock_t lock; - /* fields protected by lock */ + int pnfs_error; int error; /* merge with pnfs_error */ - unsigned long good_bytes; /* boundary of good data */ + unsigned int good_bytes; /* boundary of good data */ unsigned long flags; /* diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 68e91ef5494c..818dbe9331be 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -1241,6 +1241,7 @@ enum { NVME_SC_ANA_PERSISTENT_LOSS = 0x301, NVME_SC_ANA_INACCESSIBLE = 0x302, NVME_SC_ANA_TRANSITION = 0x303, + NVME_SC_HOST_PATH_ERROR = 0x370, NVME_SC_DNR = 0x4000, }; diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 4e85447f7860..312bfa5efd80 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * nvmem framework consumer. * * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _LINUX_NVMEM_CONSUMER_H @@ -14,6 +11,7 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/notifier.h> struct device; struct device_node; @@ -29,11 +27,36 @@ struct nvmem_cell_info { unsigned int nbits; }; +/** + * struct nvmem_cell_lookup - cell lookup entry + * + * @nvmem_name: Name of the provider. + * @cell_name: Name of the nvmem cell as defined in the name field of + * struct nvmem_cell_info. + * @dev_id: Name of the consumer device that will be associated with + * this cell. + * @con_id: Connector id for this cell lookup. + */ +struct nvmem_cell_lookup { + const char *nvmem_name; + const char *cell_name; + const char *dev_id; + const char *con_id; + struct list_head node; +}; + +enum { + NVMEM_ADD = 1, + NVMEM_REMOVE, + NVMEM_CELL_ADD, + NVMEM_CELL_REMOVE, +}; + #if IS_ENABLED(CONFIG_NVMEM) /* Cell based interface */ -struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name); -struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name); +struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id); +struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id); void nvmem_cell_put(struct nvmem_cell *cell); void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); @@ -55,18 +78,28 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf); +const char *nvmem_dev_name(struct nvmem_device *nvmem); + +void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, + size_t nentries); +void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, + size_t nentries); + +int nvmem_register_notifier(struct notifier_block *nb); +int nvmem_unregister_notifier(struct notifier_block *nb); + #else static inline struct nvmem_cell *nvmem_cell_get(struct device *dev, - const char *name) + const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, - const char *name) + const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } static inline void devm_nvmem_cell_put(struct device *dev, @@ -80,31 +113,31 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell) static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } static inline int nvmem_cell_write(struct nvmem_cell *cell, const char *buf, size_t len) { - return -ENOSYS; + return -EOPNOTSUPP; } static inline int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) { - return -ENOSYS; + return -EOPNOTSUPP; } static inline struct nvmem_device *nvmem_device_get(struct device *dev, const char *name) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *name) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } static inline void nvmem_device_put(struct nvmem_device *nvmem) @@ -120,47 +153,68 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf) { - return -ENOSYS; + return -EOPNOTSUPP; } static inline int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf) { - return -ENOSYS; + return -EOPNOTSUPP; } static inline int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset, size_t bytes, void *buf) { - return -ENOSYS; + return -EOPNOTSUPP; } static inline int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset, size_t bytes, void *buf) { - return -ENOSYS; + return -EOPNOTSUPP; } + +static inline const char *nvmem_dev_name(struct nvmem_device *nvmem) +{ + return NULL; +} + +static inline void +nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {} +static inline void +nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {} + +static inline int nvmem_register_notifier(struct notifier_block *nb) +{ + return -EOPNOTSUPP; +} + +static inline int nvmem_unregister_notifier(struct notifier_block *nb) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_NVMEM */ #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, - const char *name); + const char *id); struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *name); #else static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, - const char *name) + const char *id) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *name) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } #endif /* CONFIG_NVMEM && CONFIG_OF */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 24def6ad09bb..1e3283c2af77 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * nvmem framework provider. * * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _LINUX_NVMEM_PROVIDER_H @@ -67,30 +64,46 @@ struct nvmem_config { struct device *base_dev; }; +/** + * struct nvmem_cell_table - NVMEM cell definitions for given provider + * + * @nvmem_name: Provider name. + * @cells: Array of cell definitions. + * @ncells: Number of cell definitions in the array. + * @node: List node. + * + * This structure together with related helper functions is provided for users + * that don't can't access the nvmem provided structure but wish to register + * cell definitions for it e.g. board files registering an EEPROM device. + */ +struct nvmem_cell_table { + const char *nvmem_name; + const struct nvmem_cell_info *cells; + size_t ncells; + struct list_head node; +}; + #if IS_ENABLED(CONFIG_NVMEM) struct nvmem_device *nvmem_register(const struct nvmem_config *cfg); -int nvmem_unregister(struct nvmem_device *nvmem); +void nvmem_unregister(struct nvmem_device *nvmem); struct nvmem_device *devm_nvmem_register(struct device *dev, const struct nvmem_config *cfg); int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem); -int nvmem_add_cells(struct nvmem_device *nvmem, - const struct nvmem_cell_info *info, - int ncells); +void nvmem_add_cell_table(struct nvmem_cell_table *table); +void nvmem_del_cell_table(struct nvmem_cell_table *table); + #else static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c) { - return ERR_PTR(-ENOSYS); + return ERR_PTR(-EOPNOTSUPP); } -static inline int nvmem_unregister(struct nvmem_device *nvmem) -{ - return -ENOSYS; -} +static inline void nvmem_unregister(struct nvmem_device *nvmem) {} static inline struct nvmem_device * devm_nvmem_register(struct device *dev, const struct nvmem_config *c) @@ -101,16 +114,11 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c) static inline int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem) { - return nvmem_unregister(nvmem); - + return -EOPNOTSUPP; } -static inline int nvmem_add_cells(struct nvmem_device *nvmem, - const struct nvmem_cell_info *info, - int ncells) -{ - return -ENOSYS; -} +static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {} +static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {} #endif /* CONFIG_NVMEM */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 99b0ebf49632..ab96025b2382 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -247,12 +247,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #include <asm/prom.h> #endif -/* Default #address and #size cells. Allow arch asm/prom.h to override */ -#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT) -#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 -#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 -#endif - #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) @@ -353,6 +347,8 @@ extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); +extern struct device_node *of_get_next_cpu_node(struct device_node *prev); + #define for_each_property_of_node(dn, pp) \ for (pp = dn->properties; pp != NULL; pp = pp->next) @@ -550,6 +546,10 @@ bool of_console_check(struct device_node *dn, char *name, int index); extern int of_cpu_node_to_id(struct device_node *np); +int of_map_rid(struct device_node *np, u32 rid, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out); + #else /* CONFIG_OF */ static inline void of_core_init(void) @@ -754,6 +754,11 @@ static inline struct device_node *of_get_cpu_node(int cpu, return NULL; } +static inline struct device_node *of_get_next_cpu_node(struct device_node *prev) +{ + return NULL; +} + static inline int of_n_addr_cells(struct device_node *np) { return 0; @@ -952,6 +957,13 @@ static inline int of_cpu_node_to_id(struct device_node *np) return -ENODEV; } +static inline int of_map_rid(struct device_node *np, u32 rid, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out) +{ + return -EINVAL; +} + #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ @@ -990,7 +1002,7 @@ static inline struct device_node *of_find_matching_node( static inline const char *of_node_get_device_type(const struct device_node *np) { - return of_get_property(np, "type", NULL); + return of_get_property(np, "device_type", NULL); } static inline bool of_node_is_type(const struct device_node *np, const char *type) @@ -1217,6 +1229,10 @@ static inline int of_property_read_s32(const struct device_node *np, for (child = of_get_next_available_child(parent, NULL); child != NULL; \ child = of_get_next_available_child(parent, child)) +#define for_each_of_cpu_node(cpu) \ + for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \ + cpu = of_get_next_cpu_node(cpu)) + #define for_each_node_with_property(dn, prop_name) \ for (dn = of_find_node_with_property(NULL, prop_name); dn; \ dn = of_find_node_with_property(dn, prop_name)) diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 165fd302b442..8d31e39dd564 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -58,7 +58,6 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma); -void of_dma_deconfigure(struct device *dev); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -113,8 +112,6 @@ static inline int of_dma_configure(struct device *dev, { return 0; } -static inline void of_dma_deconfigure(struct device *dev) -{} #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index e83d87fc5673..21a89c4880fa 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -14,9 +14,6 @@ struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); int of_pci_get_devfn(struct device_node *np); void of_pci_check_probe_only(void); -int of_pci_map_rid(struct device_node *np, u32 rid, - const char *map_name, const char *map_mask_name, - struct device_node **target, u32 *id_out); #else static inline struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn) @@ -29,13 +26,6 @@ static inline int of_pci_get_devfn(struct device_node *np) return -EINVAL; } -static inline int of_pci_map_rid(struct device_node *np, u32 rid, - const char *map_name, const char *map_mask_name, - struct device_node **target, u32 *id_out) -{ - return -EINVAL; -} - static inline void of_pci_check_probe_only(void) { } #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 74bee8cecf4c..50ce1bddaf56 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -69,13 +69,14 @@ */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ - PG_error, PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, + PG_workingset, PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ + PG_error, PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, @@ -162,6 +163,14 @@ static inline int PagePoisoned(const struct page *page) return page->flags == PAGE_POISON_PATTERN; } +#ifdef CONFIG_DEBUG_VM +void page_init_poison(struct page *page, size_t size); +#else +static inline void page_init_poison(struct page *page, size_t size) +{ +} +#endif + /* * Page flags policies wrt compound pages * @@ -280,6 +289,8 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) +PAGEFLAG(Workingset, workingset, PF_HEAD) + TESTCLEARFLAG(Workingset, workingset, PF_HEAD) __PAGEFLAG(Slab, slab, PF_NO_TAIL) __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ @@ -292,6 +303,7 @@ PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) + __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index c3f1b44ade29..cb1adf0b78a9 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h @@ -119,29 +119,11 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return dma_set_coherent_mask(&dev->dev, mask); } - -static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, - unsigned int size) -{ - return dma_set_max_seg_size(&dev->dev, size); -} - -static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, - unsigned long mask) -{ - return dma_set_seg_boundary(&dev->dev, mask); -} #else static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } -static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, - unsigned int size) -{ return -EIO; } -static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, - unsigned long mask) -{ return -EIO; } #endif #endif diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h deleted file mode 100644 index 0f7aa7353ca3..000000000000 --- a/include/linux/pci-dma.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_PCI_DMA_H -#define _LINUX_PCI_DMA_H - -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME); -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME); -#define pci_unmap_addr dma_unmap_addr -#define pci_unmap_addr_set dma_unmap_addr_set -#define pci_unmap_len dma_unmap_len -#define pci_unmap_len_set dma_unmap_len_set - -#endif diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h new file mode 100644 index 000000000000..bca9bc3e5be7 --- /dev/null +++ b/include/linux/pci-p2pdma.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PCI Peer 2 Peer DMA support. + * + * Copyright (c) 2016-2018, Logan Gunthorpe + * Copyright (c) 2016-2017, Microsemi Corporation + * Copyright (c) 2017, Christoph Hellwig + * Copyright (c) 2018, Eideticom Inc. + */ + +#ifndef _LINUX_PCI_P2PDMA_H +#define _LINUX_PCI_P2PDMA_H + +#include <linux/pci.h> + +struct block_device; +struct scatterlist; + +#ifdef CONFIG_PCI_P2PDMA +int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, + u64 offset); +int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, + int num_clients, bool verbose); +bool pci_has_p2pmem(struct pci_dev *pdev); +struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients); +void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size); +void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size); +pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr); +struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, + unsigned int *nents, u32 length); +void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); +void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); +int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir); +int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, + bool *use_p2pdma); +ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, + bool use_p2pdma); +#else /* CONFIG_PCI_P2PDMA */ +static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, + size_t size, u64 offset) +{ + return -EOPNOTSUPP; +} +static inline int pci_p2pdma_distance_many(struct pci_dev *provider, + struct device **clients, int num_clients, bool verbose) +{ + return -1; +} +static inline bool pci_has_p2pmem(struct pci_dev *pdev) +{ + return false; +} +static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients, + int num_clients) +{ + return NULL; +} +static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) +{ + return NULL; +} +static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr, + size_t size) +{ +} +static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, + void *addr) +{ + return 0; +} +static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, + unsigned int *nents, u32 length) +{ + return NULL; +} +static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, + struct scatterlist *sgl) +{ +} +static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) +{ +} +static inline int pci_p2pdma_map_sg(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir) +{ + return 0; +} +static inline int pci_p2pdma_enable_store(const char *page, + struct pci_dev **p2p_dev, bool *use_p2pdma) +{ + *use_p2pdma = false; + return 0; +} +static inline ssize_t pci_p2pdma_enable_show(char *page, + struct pci_dev *p2p_dev, bool use_p2pdma) +{ + return sprintf(page, "none\n"); +} +#endif /* CONFIG_PCI_P2PDMA */ + + +static inline int pci_p2pdma_distance(struct pci_dev *provider, + struct device *client, bool verbose) +{ + return pci_p2pdma_distance_many(provider, &client, 1, verbose); +} + +static inline struct pci_dev *pci_p2pmem_find(struct device *client) +{ + return pci_p2pmem_find_many(&client, 1); +} + +#endif /* _LINUX_PCI_P2P_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 6925828f9f25..11c71c4ecf75 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -281,6 +281,7 @@ struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; +struct pci_p2pdma; /* The pci_dev structure describes PCI devices */ struct pci_dev { @@ -325,6 +326,7 @@ struct pci_dev { pci_power_t current_state; /* Current operating state. In ACPI, this is D0-D3, D0 being fully functional, and D3 being off. */ + unsigned int imm_ready:1; /* Supports Immediate Readiness */ u8 pm_cap; /* PM capability offset */ unsigned int pme_support:5; /* Bitmask of states from which PME# can be generated */ @@ -402,6 +404,7 @@ struct pci_dev { unsigned int has_secondary_link:1; unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ unsigned int is_probed:1; /* Device probing in progress */ + unsigned int link_active_reporting:1;/* Device capable of reporting link active */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -439,6 +442,9 @@ struct pci_dev { #ifdef CONFIG_PCI_PASID u16 pasid_features; #endif +#ifdef CONFIG_PCI_P2PDMA + struct pci_p2pdma *p2pdma; +#endif phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ char *driver_override; /* Driver name to force a match */ @@ -1342,7 +1348,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, /* kmem_cache style wrapper around pci_alloc_consistent() */ -#include <linux/pci-dma.h> #include <linux/dmapool.h> #define pci_pool dma_pool @@ -1705,6 +1710,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d, unsigned long *out_hwirq, unsigned int *out_type) { return -EINVAL; } + +static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, + struct pci_dev *dev) +{ return NULL; } #endif /* CONFIG_PCI */ /* Include architecture-dependent settings and functions */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index a6d6650a0490..7acc9f91e72b 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -16,8 +16,6 @@ /** * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use - * @owner: The module owner of this structure - * @mod_name: The module name (KBUILD_MODNAME) of this structure * @enable_slot: Called when the user wants to enable a specific pci slot * @disable_slot: Called when the user wants to disable a specific pci slot * @set_attention_status: Called to set the specific slot's attention LED to @@ -25,17 +23,9 @@ * @hardware_test: Called to run a specified hardware test on the specified * slot. * @get_power_status: Called to get the current power status of a slot. - * If this field is NULL, the value passed in the struct hotplug_slot_info - * will be used when this value is requested by a user. * @get_attention_status: Called to get the current attention status of a slot. - * If this field is NULL, the value passed in the struct hotplug_slot_info - * will be used when this value is requested by a user. * @get_latch_status: Called to get the current latch status of a slot. - * If this field is NULL, the value passed in the struct hotplug_slot_info - * will be used when this value is requested by a user. * @get_adapter_status: Called to get see if an adapter is present in the slot or not. - * If this field is NULL, the value passed in the struct hotplug_slot_info - * will be used when this value is requested by a user. * @reset_slot: Optional interface to allow override of a bus reset for the * slot for cases where a secondary bus reset can result in spurious * hotplug events or where a slot can be reset independent of the bus. @@ -46,8 +36,6 @@ * set an LED, enable / disable power, etc.) */ struct hotplug_slot_ops { - struct module *owner; - const char *mod_name; int (*enable_slot) (struct hotplug_slot *slot); int (*disable_slot) (struct hotplug_slot *slot); int (*set_attention_status) (struct hotplug_slot *slot, u8 value); @@ -60,37 +48,19 @@ struct hotplug_slot_ops { }; /** - * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot - * @power_status: if power is enabled or not (1/0) - * @attention_status: if the attention light is enabled or not (1/0) - * @latch_status: if the latch (if any) is open or closed (1/0) - * @adapter_status: if there is a pci board present in the slot or not (1/0) - * - * Used to notify the hotplug pci core of the status of a specific slot. - */ -struct hotplug_slot_info { - u8 power_status; - u8 attention_status; - u8 latch_status; - u8 adapter_status; -}; - -/** * struct hotplug_slot - used to register a physical slot with the hotplug pci core * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot - * @info: pointer to the &struct hotplug_slot_info for the initial values for - * this slot. - * @private: used by the hotplug pci controller driver to store whatever it - * needs. + * @owner: The module owner of this structure + * @mod_name: The module name (KBUILD_MODNAME) of this structure */ struct hotplug_slot { - struct hotplug_slot_ops *ops; - struct hotplug_slot_info *info; - void *private; + const struct hotplug_slot_ops *ops; /* Variables below this are for use only by the hotplug pci core. */ struct list_head slot_list; struct pci_slot *pci_slot; + struct module *owner; + const char *mod_name; }; static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) @@ -110,9 +80,6 @@ void pci_hp_del(struct hotplug_slot *slot); void pci_hp_destroy(struct hotplug_slot *slot); void pci_hp_deregister(struct hotplug_slot *slot); -int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, - struct hotplug_slot_info *info); - /* use a define to avoid include chaining to get THIS_MODULE & friends */ #define pci_hp_register(slot, pbus, devnr, name) \ __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d157983b84cf..69f0abe1ba1a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -117,6 +117,10 @@ #define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 +#define PCI_CLASS_SERIAL_IPMI 0x0c07 +#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700 +#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701 +#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702 #define PCI_BASE_CLASS_WIRELESS 0x0d #define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10 @@ -2539,8 +2543,6 @@ #define PCI_VENDOR_ID_HUAWEI 0x19e5 #define PCI_VENDOR_ID_NETRONOME 0x19ee -#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200 -#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240 #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 #define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 #define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 @@ -2561,6 +2563,8 @@ #define PCI_VENDOR_ID_AMAZON 0x1d0f +#define PCI_VENDOR_ID_HYGON 0x1d94 + #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 009cdf3d65b6..b297cd1cd4f1 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref); void percpu_ref_switch_to_percpu(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); +void percpu_ref_resurrect(struct percpu_ref *ref); void percpu_ref_reinit(struct percpu_ref *ref); /** diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 10f92e1d8e7b..bf309ff6f244 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -99,6 +99,7 @@ struct arm_pmu { void (*stop)(struct arm_pmu *); void (*reset)(void *); int (*map_event)(struct perf_event *event); + int (*filter_match)(struct perf_event *event); int num_events; bool secure_access; /* 32-bit ARM only */ #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 21713dc14ce2..7bb77850c65a 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -9,8 +9,10 @@ * PFN_SG_LAST - pfn references a page and is the last scatterlist entry * PFN_DEV - pfn is not covered by system memmap by default * PFN_MAP - pfn has a dynamic page mapping established by a device driver + * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not + * get_user_pages */ -#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) +#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) diff --git a/include/linux/phy.h b/include/linux/phy.h index cd6f637cbbfb..3ea87f774a76 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -19,6 +19,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/ethtool.h> +#include <linux/linkmode.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/module.h> @@ -41,13 +42,21 @@ #define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \ SUPPORTED_1000baseT_Full) -#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \ - PHY_100BT_FEATURES | \ - PHY_DEFAULT_FEATURES) - -#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \ - PHY_1000BT_FEATURES) - +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; +extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; + +#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) +#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features) +#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features) +#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) +#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) +#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) +#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) /* * Set phydev->irq to PHY_POLL if interrupts are not supported, @@ -509,7 +518,7 @@ struct phy_driver { u32 phy_id; char *name; u32 phy_id_mask; - u32 features; + const unsigned long * const features; u32 flags; const void *driver_data; @@ -967,6 +976,12 @@ static inline void phy_device_reset(struct phy_device *phydev, int value) #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) +#define phydev_info(_phydev, format, args...) \ + dev_info(&_phydev->mdio.dev, format, ##args) + +#define phydev_warn(_phydev, format, args...) \ + dev_warn(&_phydev->mdio.dev, format, ##args) + #define phydev_dbg(_phydev, format, args...) \ dev_dbg(&_phydev->mdio.dev, format, ##args) @@ -1039,7 +1054,7 @@ void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); -void phy_trigger_machine(struct phy_device *phydev, bool sync); +void phy_trigger_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); @@ -1049,6 +1064,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_start_interrupts(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); +void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); +void phy_support_sym_pause(struct phy_device *phydev); +void phy_support_asym_pause(struct phy_device *phydev); +void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, + bool autoneg); +void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); +bool phy_validate_pause(struct phy_device *phydev, + struct ethtool_pauseparam *pp); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h deleted file mode 100644 index 0a2c18a9771d..000000000000 --- a/include/linux/phy/phy-qcom-ufs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef PHY_QCOM_UFS_H_ -#define PHY_QCOM_UFS_H_ - -#include "phy.h" - -/** - * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device - * ref clock. - * @phy: reference to a generic phy. - */ -void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy); - -/** - * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device - * ref clock. - * @phy: reference to a generic phy. - */ -void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy); - -int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes); -void ufs_qcom_phy_save_controller_version(struct phy *phy, - u8 major, u16 minor, u16 step); - -#endif /* PHY_QCOM_UFS_H_ */ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 9713aebdd348..03b319f89a34 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -37,9 +37,11 @@ enum phy_mode { PHY_MODE_USB_OTG, PHY_MODE_SGMII, PHY_MODE_2500SGMII, + PHY_MODE_QSGMII, PHY_MODE_10GKR, PHY_MODE_UFS_HS_A, PHY_MODE_UFS_HS_B, + PHY_MODE_PCIE, }; /** diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h index f8f1f6b952a6..eb9805bb3fe8 100644 --- a/include/linux/platform_data/dma-ep93xx.h +++ b/include/linux/platform_data/dma-ep93xx.h @@ -85,7 +85,7 @@ static inline enum dma_transfer_direction ep93xx_dma_chan_direction(struct dma_chan *chan) { if (!ep93xx_dma_chan_is_m2p(chan)) - return DMA_NONE; + return DMA_TRANS_NONE; /* even channels are for TX, odd for RX */ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h new file mode 100644 index 000000000000..d718ccfa3421 --- /dev/null +++ b/include/linux/platform_data/dma-mcf-edma.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Freescale eDMA platform data, ColdFire SoC's family. + * + * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__ +#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__ + +struct dma_slave_map; + +bool mcf_edma_filter_fn(struct dma_chan *chan, void *param); + +#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch) + +/** + * struct mcf_edma_platform_data - platform specific data for eDMA engine + * + * @ver The eDMA module version. + * @dma_channels The number of eDMA channels. + */ +struct mcf_edma_platform_data { + int dma_channels; + const struct dma_slave_map *slave_map; + int slavecnt; +}; + +#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */ diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h index 5c15a738e116..219bd79dabfc 100644 --- a/include/linux/platform_data/ehci-sh.h +++ b/include/linux/platform_data/ehci-sh.h @@ -1,21 +1,9 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * EHCI SuperH driver platform data * * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> * Copyright (C) 2012 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __USB_EHCI_SH_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index 57a5a35e0073..f92a47e18034 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -16,46 +16,12 @@ #ifndef __DAVINCI_GPIO_PLATFORM_H #define __DAVINCI_GPIO_PLATFORM_H -#include <linux/io.h> -#include <linux/spinlock.h> - -#include <asm-generic/gpio.h> - -#define MAX_REGS_BANKS 5 -#define MAX_INT_PER_BANK 32 - struct davinci_gpio_platform_data { u32 ngpio; u32 gpio_unbanked; }; -struct davinci_gpio_irq_data { - void __iomem *regs; - struct davinci_gpio_controller *chip; - int bank_num; -}; - -struct davinci_gpio_controller { - struct gpio_chip chip; - struct irq_domain *irq_domain; - /* Serialize access to GPIO registers */ - spinlock_t lock; - void __iomem *regs[MAX_REGS_BANKS]; - int gpio_unbanked; - int irqs[MAX_INT_PER_BANK]; - unsigned int base; -}; - -/* - * basic gpio routines - */ -#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */ - /* Convert GPIO signal to GPIO pin number */ #define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) -static inline u32 __gpio_mask(unsigned gpio) -{ - return 1 << (gpio % 32); -} #endif diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 8612855691b2..8485c6a9a383 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -197,23 +197,12 @@ struct omap_gpio_platform_data { bool is_mpuio; /* whether the bank is of type MPUIO */ u32 non_wakeup_gpios; + u32 quirks; /* Version specific quirks mask */ + struct omap_gpio_reg_offs *regs; /* Return context loss count due to PM states changing */ int (*get_context_loss_count)(struct device *dev); }; -#if IS_BUILTIN(CONFIG_GPIO_OMAP) -extern void omap2_gpio_prepare_for_idle(int off_mode); -extern void omap2_gpio_resume_after_idle(void); -#else -static inline void omap2_gpio_prepare_for_idle(int off_mode) -{ -} - -static inline void omap2_gpio_resume_after_idle(void) -{ -} -#endif - #endif diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h deleted file mode 100644 index b10d11c9bb49..000000000000 --- a/include/linux/platform_data/gpio-ts5500.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * GPIO (DIO) header for Technologic Systems TS-5500 - * - * Copyright (c) 2012 Savoir-faire Linux Inc. - * Vivien Didelot <vivien.didelot@savoirfairelinux.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _PDATA_GPIO_TS5500_H -#define _PDATA_GPIO_TS5500_H - -/** - * struct ts5500_dio_platform_data - TS-5500 pin block configuration - * @base: The GPIO base number to use. - * @strap: The only pin connected to an interrupt in a block is input-only. - * If you need a bidirectional line which can trigger an IRQ, you - * may strap it with an in/out pin. This flag indicates this case. - */ -struct ts5500_dio_platform_data { - int base; - bool strap; -}; - -#endif /* _PDATA_GPIO_TS5500_H */ diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h index 73d9098ada2d..85da11916bd5 100644 --- a/include/linux/platform_data/hsmmc-omap.h +++ b/include/linux/platform_data/hsmmc-omap.h @@ -70,9 +70,6 @@ struct omap_hsmmc_platform_data { /* string specifying a particular variant of hardware */ char *version; - int gpio_cd; /* gpio (card detect) */ - int gpio_cod; /* gpio (cover detect) */ - int gpio_wp; /* gpio (write protect) */ /* if we have special card, init it using this callback */ void (*init_card)(struct mmc_card *card); diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h index 98b7925f1a2d..c0f624aca81c 100644 --- a/include/linux/platform_data/mv_usb.h +++ b/include/linux/platform_data/mv_usb.h @@ -48,6 +48,5 @@ struct mv_usb_platform_data { int (*phy_init)(void __iomem *regbase); void (*phy_deinit)(void __iomem *regbase); int (*set_vbus)(unsigned int vbus); - int (*private_init)(void __iomem *opregs, void __iomem *phyregs); }; #endif diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h index 9e20c2fb4ffd..4977c06d8a86 100644 --- a/include/linux/platform_data/pxa_sdhci.h +++ b/include/linux/platform_data/pxa_sdhci.h @@ -33,8 +33,6 @@ * 1: choose feedback clk + delay value * 2: choose internal clk * @clk_delay_enable: enable clk_delay or not, used on pxa910 - * @ext_cd_gpio: gpio pin used for external CD line - * @ext_cd_gpio_invert: invert values for external CD gpio line * @max_speed: the maximum speed supported * @host_caps: Standard MMC host capabilities bit field. * @quirks: quirks of platfrom @@ -46,8 +44,6 @@ struct sdhci_pxa_platdata { unsigned int clk_delay_cycles; unsigned int clk_delay_sel; bool clk_delay_enable; - unsigned int ext_cd_gpio; - bool ext_cd_gpio_invert; unsigned int max_speed; u32 host_caps; u32 host_caps2; diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h index f4edcb03c40c..0638fb6353bc 100644 --- a/include/linux/platform_data/spi-davinci.h +++ b/include/linux/platform_data/spi-davinci.h @@ -36,9 +36,6 @@ enum { * @num_chipselect: number of chipselects supported by this SPI master * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt * controller withn the SoC. Possible values are 0 and 1. - * @chip_sel: list of GPIOs which can act as chip-selects for the SPI. - * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary - * to populate if all chip-selects are internal. * @cshold_bug: set this to true if the SPI controller on your chip requires * a write to CSHOLD bit in between transfers (like in DM355). * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any @@ -48,7 +45,6 @@ struct davinci_spi_platform_data { u8 version; u8 num_chipselect; u8 intr_line; - u8 *chip_sel; u8 prescaler_limit; bool cshold_bug; enum dma_event_q dma_event_q; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 1a9f38f27f65..c7c081dc6034 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -40,6 +40,7 @@ struct platform_device { #define platform_get_device_id(pdev) ((pdev)->id_entry) +#define dev_is_platform(dev) ((dev)->bus == &platform_bus_type) #define to_platform_device(x) container_of((x), struct platform_device, dev) extern int platform_device_register(struct platform_device *); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 776c546d581a..3b5d7280e52e 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -17,11 +17,36 @@ #include <linux/notifier.h> #include <linux/spinlock.h> -/* Defines used for the flags field in the struct generic_pm_domain */ -#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ -#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */ -#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */ -#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) /* Keep devices active if wakeup */ +/* + * Flags to control the behaviour of a genpd. + * + * These flags may be set in the struct generic_pm_domain's flags field by a + * genpd backend driver. The flags must be set before it calls pm_genpd_init(), + * which initializes a genpd. + * + * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework, + * while powering on/off attached devices. + * + * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks, + * ->power_on|off(), doesn't sleep. Hence, these + * can be invoked from within atomic context, which + * enables genpd to power on/off the PM domain, + * even when pm_runtime_is_irq_safe() returns true, + * for any of its attached devices. Note that, a + * genpd having this flag set, requires its + * masterdomains to also have it set. + * + * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain + * powered on. + * + * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered + * on, in case any of its attached devices is used + * in the wakeup path to serve system wakeups. + */ +#define GENPD_FLAG_PM_CLK (1U << 0) +#define GENPD_FLAG_IRQ_SAFE (1U << 1) +#define GENPD_FLAG_ALWAYS_ON (1U << 2) +#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 099b31960dec..5d399eeef172 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -79,6 +79,7 @@ struct dev_pm_set_opp_data { #if defined(CONFIG_PM_OPP) struct opp_table *dev_pm_opp_get_opp_table(struct device *dev); +struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index); void dev_pm_opp_put_opp_table(struct opp_table *opp_table); unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); @@ -136,6 +137,11 @@ static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) return ERR_PTR(-ENOTSUPP); } +static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index) +{ + return ERR_PTR(-ENOTSUPP); +} + static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {} static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 9ac8fc60ad49..52453a24a24f 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -9,6 +9,7 @@ #ifndef _LINUX_PMU_H #define _LINUX_PMU_H +#include <linux/rtc.h> #include <uapi/linux/pmu.h> @@ -36,6 +37,9 @@ static inline void pmu_resume(void) extern void pmu_enable_irled(int on); +extern time64_t pmu_get_time(void); +extern int pmu_set_rtc_time(struct rtc_time *tm); + extern void pmu_restart(void); extern void pmu_shutdown(void); extern void pmu_unlock(void); diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index ee7e987ea1b4..e96581ca7c9d 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -126,5 +126,5 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); -void posixtimer_rearm(struct siginfo *info); +void posixtimer_rearm(struct kernel_siginfo *info); #endif diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index d6355f49fbae..507c5e214c42 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -24,6 +24,7 @@ enum bq27xxx_chip { BQ27546, BQ27742, BQ27545, /* bq27545 */ + BQ27411, BQ27421, /* bq27421, bq27441, bq27621 */ BQ27425, BQ27426, diff --git a/include/linux/psi.h b/include/linux/psi.h new file mode 100644 index 000000000000..8e0725aac0aa --- /dev/null +++ b/include/linux/psi.h @@ -0,0 +1,53 @@ +#ifndef _LINUX_PSI_H +#define _LINUX_PSI_H + +#include <linux/psi_types.h> +#include <linux/sched.h> + +struct seq_file; +struct css_set; + +#ifdef CONFIG_PSI + +extern bool psi_disabled; + +void psi_init(void); + +void psi_task_change(struct task_struct *task, int clear, int set); + +void psi_memstall_tick(struct task_struct *task, int cpu); +void psi_memstall_enter(unsigned long *flags); +void psi_memstall_leave(unsigned long *flags); + +int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res); + +#ifdef CONFIG_CGROUPS +int psi_cgroup_alloc(struct cgroup *cgrp); +void psi_cgroup_free(struct cgroup *cgrp); +void cgroup_move_task(struct task_struct *p, struct css_set *to); +#endif + +#else /* CONFIG_PSI */ + +static inline void psi_init(void) {} + +static inline void psi_memstall_enter(unsigned long *flags) {} +static inline void psi_memstall_leave(unsigned long *flags) {} + +#ifdef CONFIG_CGROUPS +static inline int psi_cgroup_alloc(struct cgroup *cgrp) +{ + return 0; +} +static inline void psi_cgroup_free(struct cgroup *cgrp) +{ +} +static inline void cgroup_move_task(struct task_struct *p, struct css_set *to) +{ + rcu_assign_pointer(p->cgroups, to); +} +#endif + +#endif /* CONFIG_PSI */ + +#endif /* _LINUX_PSI_H */ diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h new file mode 100644 index 000000000000..2cf422db5d18 --- /dev/null +++ b/include/linux/psi_types.h @@ -0,0 +1,92 @@ +#ifndef _LINUX_PSI_TYPES_H +#define _LINUX_PSI_TYPES_H + +#include <linux/seqlock.h> +#include <linux/types.h> + +#ifdef CONFIG_PSI + +/* Tracked task states */ +enum psi_task_count { + NR_IOWAIT, + NR_MEMSTALL, + NR_RUNNING, + NR_PSI_TASK_COUNTS, +}; + +/* Task state bitmasks */ +#define TSK_IOWAIT (1 << NR_IOWAIT) +#define TSK_MEMSTALL (1 << NR_MEMSTALL) +#define TSK_RUNNING (1 << NR_RUNNING) + +/* Resources that workloads could be stalled on */ +enum psi_res { + PSI_IO, + PSI_MEM, + PSI_CPU, + NR_PSI_RESOURCES, +}; + +/* + * Pressure states for each resource: + * + * SOME: Stalled tasks & working tasks + * FULL: Stalled tasks & no working tasks + */ +enum psi_states { + PSI_IO_SOME, + PSI_IO_FULL, + PSI_MEM_SOME, + PSI_MEM_FULL, + PSI_CPU_SOME, + /* Only per-CPU, to weigh the CPU in the global average: */ + PSI_NONIDLE, + NR_PSI_STATES, +}; + +struct psi_group_cpu { + /* 1st cacheline updated by the scheduler */ + + /* Aggregator needs to know of concurrent changes */ + seqcount_t seq ____cacheline_aligned_in_smp; + + /* States of the tasks belonging to this group */ + unsigned int tasks[NR_PSI_TASK_COUNTS]; + + /* Period time sampling buckets for each state of interest (ns) */ + u32 times[NR_PSI_STATES]; + + /* Time of last task change in this group (rq_clock) */ + u64 state_start; + + /* 2nd cacheline updated by the aggregator */ + + /* Delta detection against the sampling buckets */ + u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp; +}; + +struct psi_group { + /* Protects data updated during an aggregation */ + struct mutex stat_lock; + + /* Per-cpu task state & time tracking */ + struct psi_group_cpu __percpu *pcpu; + + /* Periodic aggregation state */ + u64 total_prev[NR_PSI_STATES - 1]; + u64 last_update; + u64 next_update; + struct delayed_work clock_work; + + /* Total stall times and sampled pressure averages */ + u64 total[NR_PSI_STATES - 1]; + unsigned long avg[NR_PSI_STATES - 1][3]; +}; + +#else /* CONFIG_PSI */ + +struct psi_group { }; + +#endif /* CONFIG_PSI */ + +#endif /* _LINUX_PSI_TYPES_H */ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index e6d226464838..602d64725222 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -46,6 +46,7 @@ struct persistent_ram_zone { phys_addr_t paddr; size_t size; void *vaddr; + char *label; struct persistent_ram_buffer *buffer; size_t buffer_size; u32 flags; @@ -65,7 +66,7 @@ struct persistent_ram_zone { struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, - unsigned int memtype, u32 flags); + unsigned int memtype, u32 flags, char *label); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 4f36431c380b..6c2ffed907f5 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -62,14 +62,17 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 -#define PTRACE_MODE_FSCREDS 0x08 -#define PTRACE_MODE_REALCREDS 0x10 +#define PTRACE_MODE_FSCREDS 0x08 +#define PTRACE_MODE_REALCREDS 0x10 +#define PTRACE_MODE_SCHED 0x20 +#define PTRACE_MODE_IBPB 0x40 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) +#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB) /** * ptrace_may_access - check whether the caller is permitted to access @@ -87,6 +90,20 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); */ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); +/** + * ptrace_may_access - check whether the caller is permitted to access + * a target task. + * @task: target task + * @mode: selects type of access and caller credentials + * + * Returns true on success, false on denial. + * + * Similar to ptrace_may_access(). Only to be called from context switch + * code. Does not call into audit and the regular LSM hooks due to locking + * constraints. + */ +extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode); + static inline int ptrace_reparented(struct task_struct *child) { return !same_thread_group(child->real_parent, child->parent); @@ -336,14 +353,19 @@ static inline void user_enable_block_step(struct task_struct *task) extern void user_enable_block_step(struct task_struct *); #endif /* arch_has_block_step */ -#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO -extern void user_single_step_siginfo(struct task_struct *tsk, - struct pt_regs *regs, siginfo_t *info); +#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT +extern void user_single_step_report(struct pt_regs *regs); #else -static inline void user_single_step_siginfo(struct task_struct *tsk, - struct pt_regs *regs, siginfo_t *info) +static inline void user_single_step_report(struct pt_regs *regs) { - info->si_signo = SIGTRAP; + kernel_siginfo_t info; + clear_siginfo(&info); + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = SI_USER; + info.si_pid = 0; + info.si_uid = 0; + force_sig_info(info.si_signo, &info, current); } #endif diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 13b4244d44c1..979087e021f3 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -196,6 +196,7 @@ enum pxa_ssp_type { PXA27x_SSP, PXA3xx_SSP, PXA168_SSP, + MMP2_SSP, PXA910_SSP, CE4100_SSP, QUARK_X1000_SSP, @@ -217,7 +218,7 @@ struct ssp_device { const char *label; int port_id; - int type; + enum pxa_ssp_type type; int use_count; int irq; diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 5d6144977828..3bcd67fd5548 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -225,19 +225,14 @@ struct geni_se { #define HW_VER_MINOR_SHFT 16 #define HW_VER_STEP_MASK GENMASK(15, 0) +#define GENI_SE_VERSION_MAJOR(ver) ((ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT) +#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT) +#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK) + #if IS_ENABLED(CONFIG_QCOM_GENI_SE) u32 geni_se_get_qup_hw_version(struct geni_se *se); -#define geni_se_get_wrapper_version(se, major, minor, step) do { \ - u32 ver; \ -\ - ver = geni_se_get_qup_hw_version(se); \ - major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \ - minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \ - step = version & HW_VER_STEP_MASK; \ -} while (0) - /** * geni_se_read_proto() - Read the protocol configured for a serial engine * @se: Pointer to the concerned serial engine. diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 5d65521260b3..06996ad4f2bc 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify @@ -33,6 +33,8 @@ struct qcom_scm_vmperm { #define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_MSS_MSA 0xF +#define QCOM_SCM_VMID_WLAN 0x18 +#define QCOM_SCM_VMID_WLAN_CE 0x19 #define QCOM_SCM_PERM_READ 0x4 #define QCOM_SCM_PERM_WRITE 0x2 #define QCOM_SCM_PERM_EXEC 0x1 diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 0081fa6d1268..03f59a28fefd 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -110,7 +110,7 @@ #define FW_MAJOR_VERSION 8 #define FW_MINOR_VERSION 37 -#define FW_REVISION_VERSION 2 +#define FW_REVISION_VERSION 7 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -931,12 +931,12 @@ struct db_rdma_dpm_params { #define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 #define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 #define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28 #define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index b34c573f2b30..66aba505ec56 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx { __le32 exp_cont_len; __le32 total_data_acked; __le32 exp_data_acked; - u8 next_tid_valid; + u8 byte2; u8 byte3; __le16 word1; __le16 next_tid; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8cd34645e892..a47321a0d572 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -667,14 +667,35 @@ enum qed_link_mode_bits { QED_LM_Autoneg_BIT = BIT(1), QED_LM_Asym_Pause_BIT = BIT(2), QED_LM_Pause_BIT = BIT(3), - QED_LM_1000baseT_Half_BIT = BIT(4), - QED_LM_1000baseT_Full_BIT = BIT(5), + QED_LM_1000baseT_Full_BIT = BIT(4), + QED_LM_10000baseT_Full_BIT = BIT(5), QED_LM_10000baseKR_Full_BIT = BIT(6), - QED_LM_25000baseKR_Full_BIT = BIT(7), - QED_LM_40000baseLR4_Full_BIT = BIT(8), - QED_LM_50000baseKR2_Full_BIT = BIT(9), - QED_LM_100000baseKR4_Full_BIT = BIT(10), - QED_LM_COUNT = 11 + QED_LM_20000baseKR2_Full_BIT = BIT(7), + QED_LM_25000baseKR_Full_BIT = BIT(8), + QED_LM_40000baseLR4_Full_BIT = BIT(9), + QED_LM_50000baseKR2_Full_BIT = BIT(10), + QED_LM_100000baseKR4_Full_BIT = BIT(11), + QED_LM_2500baseX_Full_BIT = BIT(12), + QED_LM_Backplane_BIT = BIT(13), + QED_LM_1000baseKX_Full_BIT = BIT(14), + QED_LM_10000baseKX4_Full_BIT = BIT(15), + QED_LM_10000baseR_FEC_BIT = BIT(16), + QED_LM_40000baseKR4_Full_BIT = BIT(17), + QED_LM_40000baseCR4_Full_BIT = BIT(18), + QED_LM_40000baseSR4_Full_BIT = BIT(19), + QED_LM_25000baseCR_Full_BIT = BIT(20), + QED_LM_25000baseSR_Full_BIT = BIT(21), + QED_LM_50000baseCR2_Full_BIT = BIT(22), + QED_LM_100000baseSR4_Full_BIT = BIT(23), + QED_LM_100000baseCR4_Full_BIT = BIT(24), + QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25), + QED_LM_50000baseSR2_Full_BIT = BIT(26), + QED_LM_1000baseX_Full_BIT = BIT(27), + QED_LM_10000baseCR_Full_BIT = BIT(28), + QED_LM_10000baseSR_Full_BIT = BIT(29), + QED_LM_10000baseLR_Full_BIT = BIT(30), + QED_LM_10000baseLRM_Full_BIT = BIT(31), + QED_LM_COUNT = 32 }; struct qed_link_params { diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index df4d13f7e191..d15f8e4815e3 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -39,15 +39,6 @@ #include <linux/qed/qed_ll2_if.h> #include <linux/qed/rdma_common.h> -enum qed_roce_ll2_tx_dest { - /* Light L2 TX Destination to the Network */ - QED_ROCE_LL2_TX_DEST_NW, - - /* Light L2 TX Destination to the Loopback */ - QED_ROCE_LL2_TX_DEST_LB, - QED_ROCE_LL2_TX_DEST_MAX -}; - #define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) /* rdma interface */ @@ -581,7 +572,7 @@ struct qed_roce_ll2_packet { int n_seg; struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; int roce_mode; - enum qed_roce_ll2_tx_dest tx_dest; + enum qed_ll2_tx_dest tx_dest; }; enum qed_rdma_type { diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4786c2235b98..e91ec9ddcd30 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old, * @list: the RCU-protected list to splice * @prev: points to the last element of the existing list * @next: points to the first element of the existing list - * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + * @sync: synchronize_rcu, synchronize_rcu_expedited, ... * * The list pointed to by @prev and @next can be RCU-read traversed * concurrently with this function. @@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list, * designed for stacks. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into - * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *head, @@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list, designed for queues. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into - * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_tail_init_rcu(struct list_head *list, struct list_head *head, @@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * - * This primitive may safely run concurrently with the _rcu list-mutation - * primitives such as list_add_rcu(), but requires some implicit RCU - * read-side guarding. One example is running within a special - * exception-time environment where preemption is disabled and where - * lockdep cannot be invoked (in which case updaters must use RCU-sched, - * as in synchronize_sched(), call_rcu_sched(), and friends). Another - * example is when items are added to the list, but never deleted. + * This primitive may safely run concurrently with the _rcu + * list-mutation primitives such as list_add_rcu(), but requires some + * implicit RCU read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where lockdep + * cannot be invoked. Another example is when items are added to the list, + * but never deleted. */ #define list_entry_lockless(ptr, type, member) \ container_of((typeof(ptr))READ_ONCE(ptr), type, member) @@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @head: the head for your list. * @member: the name of the list_struct within the struct. * - * This primitive may safely run concurrently with the _rcu list-mutation - * primitives such as list_add_rcu(), but requires some implicit RCU - * read-side guarding. One example is running within a special - * exception-time environment where preemption is disabled and where - * lockdep cannot be invoked (in which case updaters must use RCU-sched, - * as in synchronize_sched(), call_rcu_sched(), and friends). Another - * example is when items are added to the list, but never deleted. + * This primitive may safely run concurrently with the _rcu + * list-mutation primitives such as list_add_rcu(), but requires some + * implicit RCU read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where lockdep + * cannot be invoked. Another example is when items are added to the list, + * but never deleted. */ #define list_for_each_entry_lockless(pos, head, member) \ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 75e5b393cf44..4db8bcacc51a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -48,23 +48,14 @@ #define ulong2long(a) (*(long *)(&(a))) /* Exported common interfaces */ - -#ifdef CONFIG_PREEMPT_RCU void call_rcu(struct rcu_head *head, rcu_callback_t func); -#else /* #ifdef CONFIG_PREEMPT_RCU */ -#define call_rcu call_rcu_sched -#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); -void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); -void synchronize_sched(void); void rcu_barrier_tasks(void); +void synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); -void synchronize_rcu(void); /* * Defined as a macro as it is a very low level header included from @@ -88,11 +79,6 @@ static inline void __rcu_read_unlock(void) preempt_enable(); } -static inline void synchronize_rcu(void) -{ - synchronize_sched(); -} - static inline int rcu_preempt_depth(void) { return 0; @@ -103,8 +89,6 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active __read_mostly; -void rcu_sched_qs(void); -void rcu_bh_qs(void); void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); @@ -135,11 +119,10 @@ static inline void rcu_init_nohz(void) { } * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. * - * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden - * in the inner idle loop, that is, between the rcu_idle_enter() and - * the rcu_idle_exit() -- RCU will happily ignore any such read-side - * critical sections. However, things like powertop need tracepoints - * in the inner idle loop. + * RCU read-side critical sections are forbidden in the inner idle loop, + * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU + * will happily ignore any such read-side critical sections. However, + * things like powertop need tracepoints in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) * will tell RCU that it needs to pay attention, invoke its argument @@ -167,20 +150,16 @@ static inline void rcu_init_nohz(void) { } if (READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) -#define rcu_note_voluntary_context_switch(t) \ - do { \ - rcu_all_qs(); \ - rcu_tasks_qs(t); \ - } while (0) +#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU */ #define rcu_tasks_qs(t) do { } while (0) -#define rcu_note_voluntary_context_switch(t) rcu_all_qs() -#define call_rcu_tasks call_rcu_sched -#define synchronize_rcu_tasks synchronize_sched +#define rcu_note_voluntary_context_switch(t) do { } while (0) +#define call_rcu_tasks call_rcu +#define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU */ @@ -325,9 +304,8 @@ static inline void rcu_preempt_sleep_check(void) { } * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of - * multiple flavors of pointers to match the multiple flavors of RCU - * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in - * the future. + * multiple pointers markings to match different RCU implementations + * (e.g., __srcu), should this make sense in the future. */ #ifdef __CHECKER__ @@ -686,14 +664,9 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent of rcu_read_lock(), but to be used when updates - * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since - * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a - * softirq handler to be a quiescent state, a process in RCU read-side - * critical section must be protected by disabling softirqs. Read-side - * critical sections in interrupt context can use just rcu_read_lock(), - * though this should at least be commented to avoid confusing people - * reading the code. + * This is equivalent of rcu_read_lock(), but also disables softirqs. + * Note that anything else that disables softirqs can also serve as + * an RCU read-side critical section. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke @@ -726,10 +699,9 @@ static inline void rcu_read_unlock_bh(void) /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * This is equivalent of rcu_read_lock(), but to be used when updates - * are being done using call_rcu_sched() or synchronize_rcu_sched(). - * Read-side critical sections can also be introduced by anything that - * disables preemption, including local_irq_disable() and friends. + * This is equivalent of rcu_read_lock(), but disables preemption. + * Read-side critical sections can also be introduced by anything else + * that disables preemption, including local_irq_disable() and friends. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke @@ -885,4 +857,96 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ +/* Has the specified rcu_head structure been handed to call_rcu()? */ + +/* + * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() + * @rhp: The rcu_head structure to initialize. + * + * If you intend to invoke rcu_head_after_call_rcu() to test whether a + * given rcu_head structure has already been passed to call_rcu(), then + * you must also invoke this rcu_head_init() function on it just after + * allocating that structure. Calls to this function must not race with + * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. + */ +static inline void rcu_head_init(struct rcu_head *rhp) +{ + rhp->func = (rcu_callback_t)~0L; +} + +/* + * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? + * @rhp: The rcu_head structure to test. + * @func: The function passed to call_rcu() along with @rhp. + * + * Returns @true if the @rhp has been passed to call_rcu() with @func, + * and @false otherwise. Emits a warning in any other case, including + * the case where @rhp has already been invoked after a grace period. + * Calls to this function must not race with callback invocation. One way + * to avoid such races is to enclose the call to rcu_head_after_call_rcu() + * in an RCU read-side critical section that includes a read-side fetch + * of the pointer to the structure containing @rhp. + */ +static inline bool +rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) +{ + if (READ_ONCE(rhp->func) == f) + return true; + WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L); + return false; +} + + +/* Transitional pre-consolidation compatibility definitions. */ + +static inline void synchronize_rcu_bh(void) +{ + synchronize_rcu(); +} + +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_rcu_expedited(); +} + +static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void rcu_barrier_bh(void) +{ + rcu_barrier(); +} + +static inline void synchronize_sched(void) +{ + synchronize_rcu(); +} + +static inline void synchronize_sched_expedited(void) +{ + synchronize_rcu_expedited(); +} + +static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) +{ + call_rcu(head, func); +} + +static inline void rcu_barrier_sched(void) +{ + rcu_barrier(); +} + +static inline unsigned long get_state_synchronize_sched(void) +{ + return get_state_synchronize_rcu(); +} + +static inline void cond_synchronize_sched(unsigned long oldstate) +{ + cond_synchronize_rcu(oldstate); +} + #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 57f371344152..8a16c3eb3dd0 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -33,17 +33,17 @@ do { \ /** * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for the flavors to wait on. + * @...: List of call_rcu() functions for different grace periods to wait on * - * This macro waits concurrently for multiple flavors of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait - * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU + * This macro waits concurrently for multiple types of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait + * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU * domain requires you to write a wrapper function for that SRCU domain's * call_srcu() function, supplying the corresponding srcu_struct. * - * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU - * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called - * is automatically a grace period. + * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU, + * given that anywhere synchronize_rcu_mult() can be called is automatically + * a grace period. */ #define synchronize_rcu_mult(...) \ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8d9a0ea8f0b5..af65d1f36ddb 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -27,12 +27,6 @@ #include <linux/ktime.h> -struct rcu_dynticks; -static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) -{ - return 0; -} - /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } @@ -46,53 +40,28 @@ static inline void cond_synchronize_rcu(unsigned long oldstate) might_sleep(); } -static inline unsigned long get_state_synchronize_sched(void) -{ - return 0; -} - -static inline void cond_synchronize_sched(unsigned long oldstate) -{ - might_sleep(); -} - -extern void rcu_barrier_bh(void); -extern void rcu_barrier_sched(void); +extern void rcu_barrier(void); static inline void synchronize_rcu_expedited(void) { - synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ + synchronize_rcu(); } -static inline void rcu_barrier(void) +static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { - rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ -} - -static inline void synchronize_rcu_bh(void) -{ - synchronize_sched(); -} - -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_sched(); + call_rcu(head, func); } -static inline void synchronize_sched_expedited(void) -{ - synchronize_sched(); -} +void rcu_qs(void); -static inline void kfree_call_rcu(struct rcu_head *head, - rcu_callback_t func) +static inline void rcu_softirq_qs(void) { - call_rcu(head, func); + rcu_qs(); } #define rcu_note_context_switch(preempt) \ do { \ - rcu_sched_qs(); \ + rcu_qs(); \ rcu_tasks_qs(current); \ } while (0) @@ -108,6 +77,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) */ static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_cpu_stall_reset(void) { } +static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } static inline void rcu_idle_enter(void) { } static inline void rcu_idle_exit(void) { } static inline void rcu_irq_enter(void) { } @@ -115,6 +85,11 @@ static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } static inline void exit_rcu(void) { } +static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) +{ + return false; +} +static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } #ifdef CONFIG_SRCU void rcu_scheduler_starting(void); #else /* #ifndef CONFIG_SRCU */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 914655848ef6..7f83179177d1 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,6 +30,7 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H +void rcu_softirq_qs(void); void rcu_note_context_switch(bool preempt); int rcu_needs_cpu(u64 basem, u64 *nextevt); void rcu_cpu_stall_reset(void); @@ -44,41 +45,13 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } -void synchronize_rcu_bh(void); -void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); - void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); -/** - * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period - * - * Wait for an RCU-bh grace period to elapse, but use a "big hammer" - * approach to force the grace period to end quickly. This consumes - * significant time on all CPUs and is unfriendly to real-time workloads, - * so is thus not recommended for any sort of common-case code. In fact, - * if you are using synchronize_rcu_bh_expedited() in a loop, please - * restructure your code to batch your updates, and then use a single - * synchronize_rcu_bh() instead. - * - * Note that it is illegal to call this function while holding any lock - * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal - * to call this function from a CPU-hotplug notifier. Failing to observe - * these restriction will result in deadlock. - */ -static inline void synchronize_rcu_bh_expedited(void) -{ - synchronize_sched_expedited(); -} - void rcu_barrier(void); -void rcu_barrier_bh(void); -void rcu_barrier_sched(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); -unsigned long get_state_synchronize_sched(void); -void cond_synchronize_sched(unsigned long oldstate); void rcu_idle_enter(void); void rcu_idle_exit(void); @@ -93,7 +66,9 @@ void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_is_watching(void); +#ifndef CONFIG_PREEMPT void rcu_all_qs(void); +#endif /* RCUtree hotplug events */ int rcutree_prepare_cpu(unsigned int cpu); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 379505a53722..a367d59c301d 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *); * field is NULL but precious_table (see below) is not, the * check is performed on such table (a register is precious if * it belongs to one of the ranges specified by precious_table). + * @writeable_noinc_reg: Optional callback returning true if the register + * supports multiple write operations without incrementing + * the register number. If this field is NULL but + * wr_noinc_table (see below) is not, the check is + * performed on such table (a register is no increment + * writeable if it belongs to one of the ranges specified + * by wr_noinc_table). * @readable_noinc_reg: Optional callback returning true if the register * supports multiple read operations without incrementing * the register number. If this field is NULL but @@ -302,6 +309,7 @@ typedef void (*regmap_unlock)(void *); * @rd_table: As above, for read access. * @volatile_table: As above, for volatile registers. * @precious_table: As above, for precious registers. + * @wr_noinc_table: As above, for no increment writeable registers. * @rd_noinc_table: As above, for no increment readable registers. * @reg_defaults: Power on reset values for registers (for use with * register cache support). @@ -315,9 +323,12 @@ typedef void (*regmap_unlock)(void *); * masks are used. * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even * if they are both empty. - * @use_single_rw: If set, converts the bulk read and write operations into - * a series of single read and write operations. This is useful - * for device that does not support bulk read and write. + * @use_single_read: If set, converts the bulk read operation into a series of + * single read operations. This is useful for a device that + * does not support bulk read. + * @use_single_write: If set, converts the bulk write operation into a series of + * single write operations. This is useful for a device that + * does not support bulk write. * @can_multi_write: If set, the device supports the multi write mode of bulk * write operations, if clear multi write requests will be * split into individual write operations @@ -352,6 +363,7 @@ struct regmap_config { bool (*readable_reg)(struct device *dev, unsigned int reg); bool (*volatile_reg)(struct device *dev, unsigned int reg); bool (*precious_reg)(struct device *dev, unsigned int reg); + bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg); bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); bool disable_locking; @@ -369,6 +381,7 @@ struct regmap_config { const struct regmap_access_table *rd_table; const struct regmap_access_table *volatile_table; const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; const struct regmap_access_table *rd_noinc_table; const struct reg_default *reg_defaults; unsigned int num_reg_defaults; @@ -380,7 +393,8 @@ struct regmap_config { unsigned long write_flag_mask; bool zero_flag_mask; - bool use_single_rw; + bool use_single_read; + bool use_single_write; bool can_multi_write; enum regmap_endian reg_format_endian; @@ -979,6 +993,8 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val); int regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len); +int regmap_noinc_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, size_t val_count); int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, @@ -1222,6 +1238,13 @@ static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg, return -EINVAL; } +static inline int regmap_noinc_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, size_t val_count) { diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 0fd8fbb74763..a9c030192147 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -271,9 +271,16 @@ enum regulator_type { * @ramp_delay: Time to settle down after voltage change (unit: uV/us) * @min_dropout_uV: The minimum dropout voltage this regulator can handle * @linear_ranges: A constant table of possible voltage ranges. - * @n_linear_ranges: Number of entries in the @linear_ranges table. + * @linear_range_selectors: A constant table of voltage range selectors. + * If pickable ranges are used each range must + * have corresponding selector here. + * @n_linear_ranges: Number of entries in the @linear_ranges (and in + * linear_range_selectors if used) table(s). * @volt_table: Voltage mapping table (if table based mapping) * + * @vsel_range_reg: Register for range selector when using pickable ranges + * and regulator_regmap_X_voltage_X_pickable functions. + * @vsel_range_mask: Mask for register bitfield used for range selector * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ * @vsel_mask: Mask for register bitfield used for selector * @csel_reg: Register for TPS65218 LS3 current regulator @@ -338,10 +345,14 @@ struct regulator_desc { int min_dropout_uV; const struct regulator_linear_range *linear_ranges; + const unsigned int *linear_range_selectors; + int n_linear_ranges; const unsigned int *volt_table; + unsigned int vsel_range_reg; + unsigned int vsel_range_mask; unsigned int vsel_reg; unsigned int vsel_mask; unsigned int csel_reg; @@ -498,18 +509,25 @@ int regulator_mode_to_status(unsigned int); int regulator_list_voltage_linear(struct regulator_dev *rdev, unsigned int selector); +int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev, + unsigned int selector); int regulator_list_voltage_linear_range(struct regulator_dev *rdev, unsigned int selector); int regulator_list_voltage_table(struct regulator_dev *rdev, unsigned int selector); int regulator_map_voltage_linear(struct regulator_dev *rdev, int min_uV, int max_uV); +int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev, + int min_uV, int max_uV); int regulator_map_voltage_linear_range(struct regulator_dev *rdev, int min_uV, int max_uV); int regulator_map_voltage_iterate(struct regulator_dev *rdev, int min_uV, int max_uV); int regulator_map_voltage_ascend(struct regulator_dev *rdev, int min_uV, int max_uV); +int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev); +int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev, + unsigned int sel); int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev); int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel); int regulator_is_enabled_regmap(struct regulator_dev *rdev); diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 48918be649d4..1a4340ed8e2b 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -24,8 +24,6 @@ struct regulator_init_data; * @supply_name: Name of the regulator supply * @input_supply: Name of the input regulator supply * @microvolts: Output voltage of regulator - * @gpio: GPIO to use for enable control - * set to -EINVAL if not used * @startup_delay: Start-up time in microseconds * @gpio_is_open_drain: Gpio pin is open drain or normal type. * If it is open drain type then HIGH will be set @@ -49,7 +47,6 @@ struct fixed_voltage_config { const char *supply_name; const char *input_supply; int microvolts; - int gpio; unsigned startup_delay; unsigned gpio_is_open_drain:1; unsigned enable_high:1; diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 5d83d0c1d06c..bba2920e9c05 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -10,7 +10,7 @@ #include <linux/time64.h> struct timespec; -struct compat_timespec; +struct old_timespec32; struct pollfd; enum timespec_type { @@ -40,7 +40,7 @@ struct restart_block { enum timespec_type type; union { struct __kernel_timespec __user *rmtp; - struct compat_timespec __user *compat_rmtp; + struct old_timespec32 __user *compat_rmtp; }; u64 expires; } nanosleep; diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 6aedc30003e7..c8bb4a2b48c3 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -167,17 +167,12 @@ struct rtc_device { #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ -extern struct rtc_device *rtc_device_register(const char *name, - struct device *dev, - const struct rtc_class_ops *ops, - struct module *owner); extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __rtc_register_device(struct module *owner, struct rtc_device *rtc); -extern void rtc_device_unregister(struct rtc_device *rtc); extern void devm_rtc_device_unregister(struct device *dev, struct rtc_device *rtc); @@ -277,4 +272,20 @@ static inline int rtc_nvmem_register(struct rtc_device *rtc, static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} #endif +#ifdef CONFIG_RTC_INTF_SYSFS +int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); +int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); +#else +static inline +int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) +{ + return 0; +} + +static inline +int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) +{ + return 0; +} +#endif #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 5225832bd6ff..bb9cb84114c1 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -6,6 +6,7 @@ #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/wait.h> +#include <linux/refcount.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -34,6 +35,7 @@ extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); +extern bool refcount_dec_and_rtnl_lock(refcount_t *r); extern wait_queue_head_t netdev_unregistering_wq; extern struct rw_semaphore pernet_ops_rwsem; @@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) return rtnl_dereference(dev->ingress_queue); } +static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev) +{ + return rcu_dereference(dev->ingress_queue); +} + struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); #ifdef CONFIG_NET_INGRESS diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index ab93b6eae696..67dbb57508b1 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -45,10 +45,10 @@ struct rw_semaphore { }; /* - * Setting bit 0 of the owner field with other non-zero bits will indicate + * Setting bit 1 of the owner field but not bit 0 will indicate * that the rwsem is writer-owned with an unknown owner. */ -#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) +#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); diff --git a/include/linux/sched.h b/include/linux/sched.h index 977cb57d7bc9..8f8a5418b627 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -25,6 +25,7 @@ #include <linux/latencytop.h> #include <linux/sched/prio.h> #include <linux/signal_types.h> +#include <linux/psi_types.h> #include <linux/mm_types_task.h> #include <linux/task_io_accounting.h> #include <linux/rseq.h> @@ -571,12 +572,8 @@ union rcu_special { struct { u8 blocked; u8 need_qs; - u8 exp_need_qs; - - /* Otherwise the compiler can store garbage here: */ - u8 pad; } b; /* Bits. */ - u32 s; /* Set of bits. */ + u16 s; /* Set of bits. */ }; enum perf_event_task_context { @@ -710,6 +707,10 @@ struct task_struct { unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; unsigned sched_remote_wakeup:1; +#ifdef CONFIG_PSI + unsigned sched_psi_wake_requeue:1; +#endif + /* Force alignment to the next boundary: */ unsigned :0; @@ -723,9 +724,6 @@ struct task_struct { #endif #ifdef CONFIG_MEMCG unsigned in_user_fault:1; -#ifdef CONFIG_MEMCG_KMEM - unsigned memcg_kmem_skip_account:1; -#endif #endif #ifdef CONFIG_COMPAT_BRK unsigned brk_randomized:1; @@ -739,6 +737,12 @@ struct task_struct { unsigned use_memdelay:1; #endif + /* + * May usercopy functions fault on kernel addresses? + * This is not just a single bit because this can potentially nest. + */ + unsigned int kernel_uaccess_faults_ok; + unsigned long atomic_flags; /* Flags requiring atomic access. */ struct restart_block restart_block; @@ -960,9 +964,13 @@ struct task_struct { /* Ptrace state: */ unsigned long ptrace_message; - siginfo_t *last_siginfo; + kernel_siginfo_t *last_siginfo; struct task_io_accounting ioac; +#ifdef CONFIG_PSI + /* Pressure stall state */ + unsigned int psi_flags; +#endif #ifdef CONFIG_TASK_XACCT /* Accumulated RSS usage: */ u64 acct_rss_mem1; @@ -1389,6 +1397,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ +#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 80bc84ba5d2a..4859bea47a7b 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -22,10 +22,26 @@ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); #define EXP_5 2014 /* 1/exp(5sec/5min) */ #define EXP_15 2037 /* 1/exp(5sec/15min) */ -#define CALC_LOAD(load,exp,n) \ - load *= exp; \ - load += n*(FIXED_1-exp); \ - load >>= FSHIFT; +/* + * a1 = a0 * e + a * (1 - e) + */ +static inline unsigned long +calc_load(unsigned long load, unsigned long exp, unsigned long active) +{ + unsigned long newload; + + newload = load * exp + active * (FIXED_1 - exp); + if (active >= load) + newload += FIXED_1-1; + + return newload / FIXED_1; +} + +extern unsigned long calc_load_n(unsigned long load, unsigned long exp, + unsigned long active, unsigned int n); + +#define LOAD_INT(x) ((x) >> FSHIFT) +#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) extern void calc_global_load(unsigned long ticks); diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 1be35729c2c5..13789d10a50e 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -270,16 +270,16 @@ static inline int signal_group_exit(const struct signal_struct *sig) extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info); -static inline int kernel_dequeue_signal(siginfo_t *info) +static inline int kernel_dequeue_signal(void) { struct task_struct *tsk = current; - siginfo_t __info; + kernel_siginfo_t __info; int ret; spin_lock_irq(&tsk->sighand->siglock); - ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + ret = dequeue_signal(tsk, &tsk->blocked, &__info); spin_unlock_irq(&tsk->sighand->siglock); return ret; @@ -322,12 +322,12 @@ int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_ptrace_errno_trap(int errno, void __user *addr); -extern int send_sig_info(int, struct siginfo *, struct task_struct *); +extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig, struct task_struct *p); -extern int force_sig_info(int, struct siginfo *, struct task_struct *); -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, +extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *); +extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); +extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); +extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *, const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); @@ -475,9 +475,8 @@ static inline int kill_cad_pid(int sig, int priv) } /* These can be the second arg to send_sig_info/send_group_sig_info. */ -#define SEND_SIG_NOINFO ((struct siginfo *) 0) -#define SEND_SIG_PRIV ((struct siginfo *) 1) -#define SEND_SIG_FORCED ((struct siginfo *) 2) +#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) +#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) /* * True if we are on the alternate signal stack. diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 26347741ba50..6b9976180c1e 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -23,10 +23,10 @@ #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ diff --git a/include/linux/security.h b/include/linux/security.h index 75f4156c84d7..d170a5b031f3 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -35,7 +35,7 @@ struct linux_binprm; struct cred; struct rlimit; -struct siginfo; +struct kernel_siginfo; struct sembuf; struct kern_ipc_perm; struct audit_context; @@ -361,7 +361,7 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource, int security_task_setscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p); int security_task_movememory(struct task_struct *p); -int security_task_kill(struct task_struct *p, struct siginfo *info, +int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -1020,7 +1020,7 @@ static inline int security_task_movememory(struct task_struct *p) } static inline int security_task_kill(struct task_struct *p, - struct siginfo *info, int sig, + struct kernel_siginfo *info, int sig, const struct cred *cred) { return 0; diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index c0e795d95477..1c89611e0e06 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -36,6 +36,7 @@ enum { SCIx_SH4_SCIF_FIFODATA_REGTYPE, SCIx_SH7705_SCIF_REGTYPE, SCIx_HSCIF_REGTYPE, + SCIx_RZ_SCIFA_REGTYPE, SCIx_NR_REGTYPES, }; diff --git a/include/linux/signal.h b/include/linux/signal.h index 3d4cd5db30a9..200ed96a05af 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -11,17 +11,21 @@ struct task_struct; /* for sysctl */ extern int print_fatal_signals; -static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from) +static inline void copy_siginfo(kernel_siginfo_t *to, + const kernel_siginfo_t *from) { memcpy(to, from, sizeof(*to)); } -static inline void clear_siginfo(struct siginfo *info) +static inline void clear_siginfo(kernel_siginfo_t *info) { memset(info, 0, sizeof(*info)); } -int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); +#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) + +int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); +int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); enum siginfo_layout { SIL_KILL, @@ -36,7 +40,7 @@ enum siginfo_layout { SIL_SYS, }; -enum siginfo_layout siginfo_layout(int sig, int si_code); +enum siginfo_layout siginfo_layout(unsigned sig, int si_code); /* * Define some primitives to manipulate sigset_t. @@ -257,11 +261,11 @@ struct pt_regs; enum pid_type; extern int next_signal(struct sigpending *pending, sigset_t *mask); -extern int do_send_sig_info(int sig, struct siginfo *info, +extern int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); -extern int group_send_sig_info(int sig, struct siginfo *info, +extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); -extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); +extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index 222ae696000b..f8a90ae9c6ec 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -9,6 +9,10 @@ #include <linux/list.h> #include <uapi/linux/signal.h> +typedef struct kernel_siginfo { + __SIGINFO; +} kernel_siginfo_t; + /* * Real Time signals may be queued. */ @@ -16,7 +20,7 @@ struct sigqueue { struct list_head list; int flags; - siginfo_t info; + kernel_siginfo_t info; struct user_struct *user; }; @@ -60,7 +64,7 @@ struct old_sigaction { struct ksignal { struct k_sigaction ka; - siginfo_t info; + kernel_siginfo_t info; int sig; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 17a13e4785fc..0ba687454267 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -243,6 +243,8 @@ struct scatterlist; struct pipe_inode_info; struct iov_iter; struct napi_struct; +struct bpf_prog; +union bpf_attr; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -689,7 +691,7 @@ struct sk_buff { union { ktime_t tstamp; - u64 skb_mstamp; + u64 skb_mstamp_ns; /* earliest departure time */ }; /* * This is the control buffer. It is free to use for every @@ -1080,11 +1082,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad) } #define dev_kfree_skb(a) consume_skb(a) -int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, - int getfrag(void *from, char *to, int offset, - int len, int odd, struct sk_buff *skb), - void *from, int length); - int skb_append_pagefrags(struct sk_buff *skb, struct page *page, int offset, size_t size); @@ -1192,6 +1189,24 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); +#ifdef CONFIG_NET +int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); + +int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr); +#else +static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} +#endif + bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, @@ -1339,6 +1354,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb) } } +static inline void skb_mark_not_on_list(struct sk_buff *skb) +{ + skb->next = NULL; +} + +static inline void skb_list_del_init(struct sk_buff *skb) +{ + __list_del_entry(&skb->list); + skb_mark_not_on_list(skb); +} + /** * skb_queue_empty - check if a queue is empty * @list: queue head @@ -1593,6 +1619,17 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) } /** + * __skb_peek - peek at the head of a non-empty &sk_buff_head + * @list_: list to peek at + * + * Like skb_peek(), but the caller knows that the list is not empty. + */ +static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_) +{ + return list_->next; +} + +/** * skb_peek_next - peek skb following the given one from a queue * @skb: skb to start from * @list_: list to peek at @@ -3468,13 +3505,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, #define __it(x, op) (x -= sizeof(u##op)) #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) case 32: diffs |= __it_diff(a, b, 64); + /* fall through */ case 24: diffs |= __it_diff(a, b, 64); + /* fall through */ case 16: diffs |= __it_diff(a, b, 64); + /* fall through */ case 8: diffs |= __it_diff(a, b, 64); break; case 28: diffs |= __it_diff(a, b, 64); + /* fall through */ case 20: diffs |= __it_diff(a, b, 64); + /* fall through */ case 12: diffs |= __it_diff(a, b, 64); + /* fall through */ case 4: diffs |= __it_diff(a, b, 32); break; } diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h new file mode 100644 index 000000000000..2a11e9d91dfa --- /dev/null +++ b/include/linux/skmsg.h @@ -0,0 +1,434 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ + +#ifndef _LINUX_SKMSG_H +#define _LINUX_SKMSG_H + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> + +#include <net/sock.h> +#include <net/tcp.h> +#include <net/strparser.h> + +#define MAX_MSG_FRAGS MAX_SKB_FRAGS + +enum __sk_action { + __SK_DROP = 0, + __SK_PASS, + __SK_REDIRECT, + __SK_NONE, +}; + +struct sk_msg_sg { + u32 start; + u32 curr; + u32 end; + u32 size; + u32 copybreak; + bool copy[MAX_MSG_FRAGS]; + /* The extra element is used for chaining the front and sections when + * the list becomes partitioned (e.g. end < start). The crypto APIs + * require the chaining. + */ + struct scatterlist data[MAX_MSG_FRAGS + 1]; +}; + +struct sk_msg { + struct sk_msg_sg sg; + void *data; + void *data_end; + u32 apply_bytes; + u32 cork_bytes; + u32 flags; + struct sk_buff *skb; + struct sock *sk_redir; + struct sock *sk; + struct list_head list; +}; + +struct sk_psock_progs { + struct bpf_prog *msg_parser; + struct bpf_prog *skb_parser; + struct bpf_prog *skb_verdict; +}; + +enum sk_psock_state_bits { + SK_PSOCK_TX_ENABLED, +}; + +struct sk_psock_link { + struct list_head list; + struct bpf_map *map; + void *link_raw; +}; + +struct sk_psock_parser { + struct strparser strp; + bool enabled; + void (*saved_data_ready)(struct sock *sk); +}; + +struct sk_psock_work_state { + struct sk_buff *skb; + u32 len; + u32 off; +}; + +struct sk_psock { + struct sock *sk; + struct sock *sk_redir; + u32 apply_bytes; + u32 cork_bytes; + u32 eval; + struct sk_msg *cork; + struct sk_psock_progs progs; + struct sk_psock_parser parser; + struct sk_buff_head ingress_skb; + struct list_head ingress_msg; + unsigned long state; + struct list_head link; + spinlock_t link_lock; + refcount_t refcnt; + void (*saved_unhash)(struct sock *sk); + void (*saved_close)(struct sock *sk, long timeout); + void (*saved_write_space)(struct sock *sk); + struct proto *sk_proto; + struct sk_psock_work_state work_state; + struct work_struct work; + union { + struct rcu_head rcu; + struct work_struct gc; + }; +}; + +int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, + int elem_first_coalesce); +int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, + u32 off, u32 len); +void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); +int sk_msg_free(struct sock *sk, struct sk_msg *msg); +int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); +void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); +void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, + u32 bytes); + +void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); +void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); + +int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, + struct sk_msg *msg, u32 bytes); +int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, + struct sk_msg *msg, u32 bytes); + +static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) +{ + WARN_ON(i == msg->sg.end && bytes); +} + +static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) +{ + if (psock->apply_bytes) { + if (psock->apply_bytes < bytes) + psock->apply_bytes = 0; + else + psock->apply_bytes -= bytes; + } +} + +#define sk_msg_iter_var_prev(var) \ + do { \ + if (var == 0) \ + var = MAX_MSG_FRAGS - 1; \ + else \ + var--; \ + } while (0) + +#define sk_msg_iter_var_next(var) \ + do { \ + var++; \ + if (var == MAX_MSG_FRAGS) \ + var = 0; \ + } while (0) + +#define sk_msg_iter_prev(msg, which) \ + sk_msg_iter_var_prev(msg->sg.which) + +#define sk_msg_iter_next(msg, which) \ + sk_msg_iter_var_next(msg->sg.which) + +static inline void sk_msg_clear_meta(struct sk_msg *msg) +{ + memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); +} + +static inline void sk_msg_init(struct sk_msg *msg) +{ + BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS); + memset(msg, 0, sizeof(*msg)); + sg_init_marker(msg->sg.data, MAX_MSG_FRAGS); +} + +static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, + int which, u32 size) +{ + dst->sg.data[which] = src->sg.data[which]; + dst->sg.data[which].length = size; + dst->sg.size += size; + src->sg.data[which].length -= size; + src->sg.data[which].offset += size; +} + +static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) +{ + memcpy(dst, src, sizeof(*src)); + sk_msg_init(src); +} + +static inline bool sk_msg_full(const struct sk_msg *msg) +{ + return (msg->sg.end == msg->sg.start) && msg->sg.size; +} + +static inline u32 sk_msg_elem_used(const struct sk_msg *msg) +{ + if (sk_msg_full(msg)) + return MAX_MSG_FRAGS; + + return msg->sg.end >= msg->sg.start ? + msg->sg.end - msg->sg.start : + msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start); +} + +static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) +{ + return &msg->sg.data[which]; +} + +static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) +{ + return msg->sg.data[which]; +} + +static inline struct page *sk_msg_page(struct sk_msg *msg, int which) +{ + return sg_page(sk_msg_elem(msg, which)); +} + +static inline bool sk_msg_to_ingress(const struct sk_msg *msg) +{ + return msg->flags & BPF_F_INGRESS; +} + +static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) +{ + struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); + + if (msg->sg.copy[msg->sg.start]) { + msg->data = NULL; + msg->data_end = NULL; + } else { + msg->data = sg_virt(sge); + msg->data_end = msg->data + sge->length; + } +} + +static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, + u32 len, u32 offset) +{ + struct scatterlist *sge; + + get_page(page); + sge = sk_msg_elem(msg, msg->sg.end); + sg_set_page(sge, page, len, offset); + sg_unmark_end(sge); + + msg->sg.copy[msg->sg.end] = true; + msg->sg.size += len; + sk_msg_iter_next(msg, end); +} + +static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) +{ + do { + msg->sg.copy[i] = copy_state; + sk_msg_iter_var_next(i); + if (i == msg->sg.end) + break; + } while (1); +} + +static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) +{ + sk_msg_sg_copy(msg, start, true); +} + +static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) +{ + sk_msg_sg_copy(msg, start, false); +} + +static inline struct sk_psock *sk_psock(const struct sock *sk) +{ + return rcu_dereference_sk_user_data(sk); +} + +static inline void sk_psock_queue_msg(struct sk_psock *psock, + struct sk_msg *msg) +{ + list_add_tail(&msg->list, &psock->ingress_msg); +} + +static inline bool sk_psock_queue_empty(const struct sk_psock *psock) +{ + return psock ? list_empty(&psock->ingress_msg) : true; +} + +static inline void sk_psock_report_error(struct sk_psock *psock, int err) +{ + struct sock *sk = psock->sk; + + sk->sk_err = err; + sk->sk_error_report(sk); +} + +struct sk_psock *sk_psock_init(struct sock *sk, int node); + +int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); +void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); +void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); + +int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, + struct sk_msg *msg); + +static inline struct sk_psock_link *sk_psock_init_link(void) +{ + return kzalloc(sizeof(struct sk_psock_link), + GFP_ATOMIC | __GFP_NOWARN); +} + +static inline void sk_psock_free_link(struct sk_psock_link *link) +{ + kfree(link); +} + +struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); +#if defined(CONFIG_BPF_STREAM_PARSER) +void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link); +#else +static inline void sk_psock_unlink(struct sock *sk, + struct sk_psock_link *link) +{ +} +#endif + +void __sk_psock_purge_ingress_msg(struct sk_psock *psock); + +static inline void sk_psock_cork_free(struct sk_psock *psock) +{ + if (psock->cork) { + sk_msg_free(psock->sk, psock->cork); + kfree(psock->cork); + psock->cork = NULL; + } +} + +static inline void sk_psock_update_proto(struct sock *sk, + struct sk_psock *psock, + struct proto *ops) +{ + psock->saved_unhash = sk->sk_prot->unhash; + psock->saved_close = sk->sk_prot->close; + psock->saved_write_space = sk->sk_write_space; + + psock->sk_proto = sk->sk_prot; + sk->sk_prot = ops; +} + +static inline void sk_psock_restore_proto(struct sock *sk, + struct sk_psock *psock) +{ + if (psock->sk_proto) { + sk->sk_prot = psock->sk_proto; + psock->sk_proto = NULL; + } +} + +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline struct sk_psock *sk_psock_get_checked(struct sock *sk) +{ + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (psock) { + if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) { + psock = ERR_PTR(-EBUSY); + goto out; + } + + if (!refcount_inc_not_zero(&psock->refcnt)) + psock = ERR_PTR(-EBUSY); + } +out: + rcu_read_unlock(); + return psock; +} + +static inline struct sk_psock *sk_psock_get(struct sock *sk) +{ + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + if (psock && !refcount_inc_not_zero(&psock->refcnt)) + psock = NULL; + rcu_read_unlock(); + return psock; +} + +void sk_psock_stop(struct sock *sk, struct sk_psock *psock); +void sk_psock_destroy(struct rcu_head *rcu); +void sk_psock_drop(struct sock *sk, struct sk_psock *psock); + +static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) +{ + if (refcount_dec_and_test(&psock->refcnt)) + sk_psock_drop(sk, psock); +} + +static inline void psock_set_prog(struct bpf_prog **pprog, + struct bpf_prog *prog) +{ + prog = xchg(pprog, prog); + if (prog) + bpf_prog_put(prog); +} + +static inline void psock_progs_drop(struct sk_psock_progs *progs) +{ + psock_set_prog(&progs->msg_parser, NULL); + psock_set_prog(&progs->skb_parser, NULL); + psock_set_prog(&progs->skb_verdict, NULL); +} + +#endif /* _LINUX_SKMSG_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index ed9cbddeb4a6..918f374e7156 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -295,12 +295,43 @@ static inline void __check_heap_object(const void *ptr, unsigned long n, #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ (KMALLOC_MIN_SIZE) : 16) +/* + * Whenever changing this, take care of that kmalloc_type() and + * create_kmalloc_caches() still work as intended. + */ +enum kmalloc_cache_type { + KMALLOC_NORMAL = 0, + KMALLOC_RECLAIM, +#ifdef CONFIG_ZONE_DMA + KMALLOC_DMA, +#endif + NR_KMALLOC_TYPES +}; + #ifndef CONFIG_SLOB -extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +extern struct kmem_cache * +kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; + +static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) +{ + int is_dma = 0; + int type_dma = 0; + int is_reclaimable; + #ifdef CONFIG_ZONE_DMA -extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; + is_dma = !!(flags & __GFP_DMA); + type_dma = is_dma * KMALLOC_DMA; #endif + is_reclaimable = !!(flags & __GFP_RECLAIMABLE); + + /* + * If an allocation is both __GFP_DMA and __GFP_RECLAIMABLE, return + * KMALLOC_DMA and effectively ignore __GFP_RECLAIMABLE + */ + return type_dma + (is_reclaimable & !is_dma) * KMALLOC_RECLAIM; +} + /* * Figure out which kmalloc slab an allocation of a certain size * belongs to. @@ -501,18 +532,20 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { +#ifndef CONFIG_SLOB + unsigned int index; +#endif if (size > KMALLOC_MAX_CACHE_SIZE) return kmalloc_large(size, flags); #ifndef CONFIG_SLOB - if (!(flags & GFP_DMA)) { - unsigned int index = kmalloc_index(size); + index = kmalloc_index(size); - if (!index) - return ZERO_SIZE_PTR; + if (!index) + return ZERO_SIZE_PTR; - return kmem_cache_alloc_trace(kmalloc_caches[index], - flags, size); - } + return kmem_cache_alloc_trace( + kmalloc_caches[kmalloc_type(flags)][index], + flags, size); #endif } return __kmalloc(size, flags); @@ -542,13 +575,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && - size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { + size <= KMALLOC_MAX_CACHE_SIZE) { unsigned int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; - return kmem_cache_alloc_node_trace(kmalloc_caches[i], + return kmem_cache_alloc_node_trace( + kmalloc_caches[kmalloc_type(flags)][i], flags, node, size); } #endif diff --git a/include/linux/smp.h b/include/linux/smp.h index 9fb239e12b82..a56f08ff3097 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); +void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info), + smp_call_func_t func, void *info, bool wait, + gfp_t gfp_flags, const struct cpumask *mask); + int smp_call_function_single_async(int cpu, call_single_data_t *csd); #ifdef CONFIG_SMP diff --git a/include/linux/socket.h b/include/linux/socket.h index 7ed4713d5337..8b571e9b9f76 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -348,7 +348,7 @@ struct ucred { extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); -struct timespec; +struct timespec64; /* The __sys_...msg variants allow MSG_CMSG_COMPAT iff * forbid_cmsg_compat==false @@ -358,7 +358,7 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, bool forbid_cmsg_compat); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, - unsigned int flags, struct timespec *timeout); + unsigned int flags, struct timespec64 *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, bool forbid_cmsg_compat); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 962971e6a9c7..df313913e856 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -678,6 +678,9 @@ struct sdw_master_ops { * @defer_msg: Defer message * @clk_stop_timeout: Clock stop timeout computed * @bank_switch_timeout: Bank switch timeout computed + * @multi_link: Store bus property that indicates if multi links + * are supported. This flag is populated by drivers after reading + * appropriate firmware (ACPI/DT). */ struct sdw_bus { struct device *dev; @@ -694,6 +697,7 @@ struct sdw_bus { struct sdw_defer defer_msg; unsigned int clk_stop_timeout; u32 bank_switch_timeout; + bool multi_link; }; int sdw_add_bus_master(struct sdw_bus *bus); @@ -768,14 +772,18 @@ struct sdw_stream_params { * @params: Stream parameters * @state: Current state of the stream * @type: Stream type PCM or PDM - * @m_rt: Master runtime + * @master_list: List of Master runtime(s) in this stream. + * master_list can contain only one m_rt per Master instance + * for a stream + * @m_rt_count: Count of Master runtime(s) in this stream */ struct sdw_stream_runtime { char *name; struct sdw_stream_params params; enum sdw_stream_state state; enum sdw_stream_type type; - struct sdw_master_runtime *m_rt; + struct list_head master_list; + int m_rt_count; }; struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a64235e05321..6be77fa5ab90 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -1,15 +1,6 @@ -/* - * Copyright (C) 2005 David Brownell +/* SPDX-License-Identifier: GPL-2.0-or-later * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * Copyright (C) 2005 David Brownell */ #ifndef __LINUX_SPI_H @@ -163,10 +154,12 @@ struct spi_device { #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ +#define SPI_CS_WORD 0x1000 /* toggle cs after each word */ int irq; void *controller_state; void *controller_data; char modalias[SPI_NAME_SIZE]; + const char *driver_override; int cs_gpio; /* chip select gpio */ /* the statistics */ @@ -177,7 +170,6 @@ struct spi_device { * the controller talks to each chip, like: * - memory packing (12 bit samples into low bits, others zeroed) * - priority - * - drop chipselect after each word * - chipselect delays * - ... */ @@ -711,6 +703,8 @@ extern void spi_res_release(struct spi_controller *ctlr, * @delay_usecs: microseconds to delay after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. + * @word_delay: clock cycles to inter word delay after each word size + * (set by bits_per_word) transmission. * @transfer_list: transfers are sequenced through @spi_message.transfers * @tx_sg: Scatterlist for transmit, currently not for client use * @rx_sg: Scatterlist for receive, currently not for client use @@ -793,6 +787,7 @@ struct spi_transfer { u8 bits_per_word; u16 delay_usecs; u32 speed_hz; + u16 word_delay; struct list_head transfer_list; }; @@ -1277,7 +1272,6 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) { return 0; } #endif - /* If you're hotplugging an adapter with devices (parport, usb, etc) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but @@ -1309,6 +1303,22 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer) return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); } +/* OF support code */ +#if IS_ENABLED(CONFIG_OF) + +/* must call put_device() when done with returned spi_device device */ +extern struct spi_device * +of_find_spi_device_by_node(struct device_node *node); + +#else + +static inline struct spi_device * +of_find_spi_device_by_node(struct device_node *node) +{ + return NULL; +} + +#endif /* IS_ENABLED(CONFIG_OF) */ /* Compatibility layer */ #define spi_master spi_controller diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 745d4ca4dd50..0ae91b3a7406 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -105,12 +105,13 @@ struct srcu_struct { #define SRCU_STATE_SCAN2 2 #define __SRCU_STRUCT_INIT(name, pcpu_name) \ - { \ - .sda = &pcpu_name, \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .srcu_gp_seq_needed = 0 - 1, \ - __SRCU_DEP_MAP_INIT(name) \ - } +{ \ + .sda = &pcpu_name, \ + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .srcu_gp_seq_needed = -1UL, \ + .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \ + __SRCU_DEP_MAP_INIT(name) \ +} /* * Define and initialize a srcu struct at build time. diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h index 4b268d86a784..8b369a41c03c 100644 --- a/include/linux/start_kernel.h +++ b/include/linux/start_kernel.h @@ -9,5 +9,7 @@ up something else. */ extern asmlinkage void __init start_kernel(void); +extern void __init arch_call_rest_init(void); +extern void __ref rest_init(void); #endif /* _LINUX_START_KERNEL_H */ diff --git a/include/linux/string.h b/include/linux/string.h index 4a5a0eb7df51..27d0482e5e05 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -131,6 +131,13 @@ static inline void *memset_p(void **p, void *v, __kernel_size_t n) return memset64((uint64_t *)p, (uintptr_t)v, n); } +extern void **__memcat_p(void **a, void **b); +#define memcat_p(a, b) ({ \ + BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \ + "type mismatch in memcat_p()"); \ + (typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \ +}) + #ifndef __HAVE_ARCH_MEMCPY extern void * memcpy(void *,const void *,__kernel_size_t); #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 58a6765c1c5e..c4db9424b63b 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -67,7 +67,7 @@ struct rpc_cred { const struct rpc_credops *cr_ops; unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ - atomic_t cr_count; /* ref count */ + refcount_t cr_count; /* ref count */ kuid_t cr_uid; @@ -100,7 +100,7 @@ struct rpc_auth { * differ from the flavor in * au_ops->au_flavor in gss * case) */ - atomic_t au_count; /* Reference counter */ + refcount_t au_count; /* Reference counter */ struct rpc_cred_cache * au_credcache; /* per-flavor data */ @@ -157,6 +157,7 @@ struct rpc_credops { int (*crkey_timeout)(struct rpc_cred *); bool (*crkey_to_expire)(struct rpc_cred *); char * (*crstringify_acceptor)(struct rpc_cred *); + bool (*crneed_reencode)(struct rpc_task *); }; extern const struct rpc_authops authunix_ops; @@ -192,6 +193,7 @@ __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); __be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); +bool rpcauth_xmit_need_reencode(struct rpc_task *task); int rpcauth_refreshcred(struct rpc_task *); void rpcauth_invalcred(struct rpc_task *); int rpcauth_uptodatecred(struct rpc_task *); @@ -204,11 +206,11 @@ bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *); char * rpcauth_stringify_acceptor(struct rpc_cred *); static inline -struct rpc_cred * get_rpccred(struct rpc_cred *cred) +struct rpc_cred *get_rpccred(struct rpc_cred *cred) { - if (cred != NULL) - atomic_inc(&cred->cr_count); - return cred; + if (cred != NULL && refcount_inc_not_zero(&cred->cr_count)) + return cred; + return NULL; } /** @@ -224,9 +226,7 @@ struct rpc_cred * get_rpccred(struct rpc_cred *cred) static inline struct rpc_cred * get_rpccred_rcu(struct rpc_cred *cred) { - if (atomic_inc_not_zero(&cred->cr_count)) - return cred; - return NULL; + return get_rpccred(cred); } #endif /* __KERNEL__ */ diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 0c9eac351aab..30427b729070 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h @@ -70,6 +70,7 @@ struct gss_cl_ctx { refcount_t count; enum rpc_gss_proc gc_proc; u32 gc_seq; + u32 gc_seq_xmit; spinlock_t gc_seq_lock; struct gss_ctx *gc_gss_ctx; struct xdr_netobj gc_wire_ctx; diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 4397a4824c81..28721cf73ec3 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef CONFIG_SUNRPC_BACKCHANNEL struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid); void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); +void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task); void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index 7df625d41e35..131424cefc6a 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -71,10 +71,10 @@ struct gss_krb5_enctype { const u32 keyed_cksum; /* is it a keyed cksum? */ const u32 keybytes; /* raw key len, in bytes */ const u32 keylength; /* final key len, in bytes */ - u32 (*encrypt) (struct crypto_skcipher *tfm, + u32 (*encrypt) (struct crypto_sync_skcipher *tfm, void *iv, void *in, void *out, int length); /* encryption function */ - u32 (*decrypt) (struct crypto_skcipher *tfm, + u32 (*decrypt) (struct crypto_sync_skcipher *tfm, void *iv, void *in, void *out, int length); /* decryption function */ u32 (*mk_key) (const struct gss_krb5_enctype *gk5e, @@ -98,12 +98,12 @@ struct krb5_ctx { u32 enctype; u32 flags; const struct gss_krb5_enctype *gk5e; /* enctype-specific info */ - struct crypto_skcipher *enc; - struct crypto_skcipher *seq; - struct crypto_skcipher *acceptor_enc; - struct crypto_skcipher *initiator_enc; - struct crypto_skcipher *acceptor_enc_aux; - struct crypto_skcipher *initiator_enc_aux; + struct crypto_sync_skcipher *enc; + struct crypto_sync_skcipher *seq; + struct crypto_sync_skcipher *acceptor_enc; + struct crypto_sync_skcipher *initiator_enc; + struct crypto_sync_skcipher *acceptor_enc_aux; + struct crypto_sync_skcipher *initiator_enc_aux; u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */ u8 cksum[GSS_KRB5_MAX_KEYLEN]; s32 endtime; @@ -118,7 +118,8 @@ struct krb5_ctx { u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN]; }; -extern spinlock_t krb5_seq_lock; +extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx); +extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx); /* The length of the Kerberos GSS token header */ #define GSS_KRB5_TOK_HDR_LEN (16) @@ -262,24 +263,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, u32 -krb5_encrypt(struct crypto_skcipher *key, +krb5_encrypt(struct crypto_sync_skcipher *key, void *iv, void *in, void *out, int length); u32 -krb5_decrypt(struct crypto_skcipher *key, +krb5_decrypt(struct crypto_sync_skcipher *key, void *iv, void *in, void *out, int length); int -gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf, +gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf, int offset, struct page **pages); int -gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf, +gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf, int offset); s32 krb5_make_seq_num(struct krb5_ctx *kctx, - struct crypto_skcipher *key, + struct crypto_sync_skcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf); @@ -320,12 +321,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, int krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, - struct crypto_skcipher *cipher, + struct crypto_sync_skcipher *cipher, unsigned char *cksum); int krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, - struct crypto_skcipher *cipher, + struct crypto_sync_skcipher *cipher, s32 seqnum); void gss_krb5_make_confounder(char *p, u32 conflen); diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 592653becd91..7b540c066594 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -140,8 +140,9 @@ struct rpc_task_setup { #define RPC_TASK_RUNNING 0 #define RPC_TASK_QUEUED 1 #define RPC_TASK_ACTIVE 2 -#define RPC_TASK_MSG_RECV 3 -#define RPC_TASK_MSG_RECV_WAIT 4 +#define RPC_TASK_NEED_XMIT 3 +#define RPC_TASK_NEED_RECV 4 +#define RPC_TASK_MSG_PIN_WAIT 5 #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) @@ -188,7 +189,6 @@ struct rpc_timer { struct rpc_wait_queue { spinlock_t lock; struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ - pid_t owner; /* process id of last task serviced */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ unsigned char nr; /* # tasks remaining for cookie */ @@ -204,7 +204,6 @@ struct rpc_wait_queue { * from a single cookie. The aim is to improve * performance of NFS operations such as read/write. */ -#define RPC_BATCH_COUNT 16 #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) /* @@ -234,6 +233,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq, struct rpc_task *task); void rpc_wake_up_queued_task(struct rpc_wait_queue *, struct rpc_task *); +void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *, + struct rpc_task *, + int); void rpc_wake_up(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index c3d72066d4b1..6b7a86c4d6e6 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -84,7 +84,6 @@ struct svc_xprt { struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; - struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ struct list_head xpt_users; /* callbacks on free */ struct net *xpt_net; diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 2bd68177a442..43106ffa6788 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -18,6 +18,7 @@ #include <asm/unaligned.h> #include <linux/scatterlist.h> +struct bio_vec; struct rpc_rqst; /* @@ -52,12 +53,14 @@ struct xdr_buf { struct kvec head[1], /* RPC header + non-page data */ tail[1]; /* Appended after page data */ + struct bio_vec *bvec; struct page ** pages; /* Array of pages */ unsigned int page_base, /* Start of page data */ page_len, /* Length of page data */ flags; /* Flags for data disposition */ #define XDRBUF_READ 0x01 /* target of file read */ #define XDRBUF_WRITE 0x02 /* source of file write */ +#define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */ unsigned int buflen, /* Total length of storage buffer */ len; /* Length of XDR encoded message */ @@ -69,6 +72,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; + buf->bvec = NULL; + buf->pages = NULL; buf->page_len = 0; buf->flags = 0; buf->len = 0; @@ -115,6 +120,9 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); void xdr_terminate_string(struct xdr_buf *, const u32); +size_t xdr_buf_pagecount(struct xdr_buf *buf); +int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); +void xdr_free_bvec(struct xdr_buf *buf); static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len) { @@ -177,10 +185,7 @@ struct xdr_skb_reader { typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); -size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len); extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); -extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, - struct xdr_skb_reader *, xdr_skb_read_actor); extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 336fd1a19cca..a4ab4f8d9140 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -82,7 +82,14 @@ struct rpc_rqst { struct page **rq_enc_pages; /* scratch pages for use by gss privacy code */ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ - struct list_head rq_list; + + union { + struct list_head rq_list; /* Slot allocation list */ + struct rb_node rq_recv; /* Receive queue */ + }; + + struct list_head rq_xmit; /* Send queue */ + struct list_head rq_xmit2; /* Send queue */ void *rq_buffer; /* Call XDR encode buffer */ size_t rq_callsize; @@ -103,6 +110,7 @@ struct rpc_rqst { /* A cookie used to track the state of the transport connection */ + atomic_t rq_pin; /* * Partial send handling @@ -133,7 +141,8 @@ struct rpc_xprt_ops { void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); - int (*send_request)(struct rpc_task *task); + void (*prepare_request)(struct rpc_rqst *req); + int (*send_request)(struct rpc_rqst *req); void (*set_retrans_timeout)(struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_request)(struct rpc_task *task); @@ -234,9 +243,12 @@ struct rpc_xprt { */ spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ - spinlock_t recv_lock; /* lock receive list */ + spinlock_t queue_lock; /* send/receive queue lock */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ + + struct list_head xmit_queue; /* Send queue */ + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ @@ -248,7 +260,8 @@ struct rpc_xprt { struct list_head bc_pa_list; /* List of preallocated * backchannel rpc_rqst's */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ - struct list_head recv; + + struct rb_root recv_queue; /* Receive queue */ struct { unsigned long bind_count, /* total number of binds */ @@ -325,15 +338,18 @@ struct xprt_class { struct rpc_xprt *xprt_create_transport(struct xprt_create *args); void xprt_connect(struct rpc_task *task); void xprt_reserve(struct rpc_task *task); -void xprt_request_init(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req); -void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_request_prepare(struct rpc_rqst *req); bool xprt_prepare_transmit(struct rpc_task *task); +void xprt_request_enqueue_transmit(struct rpc_task *task); +void xprt_request_enqueue_receive(struct rpc_task *task); +void xprt_request_wait_receive(struct rpc_task *task); +bool xprt_request_need_retransmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); int xprt_adjust_timeout(struct rpc_rqst *req); @@ -373,8 +389,8 @@ int xprt_load_transport(const char *); void xprt_set_retrans_timeout_def(struct rpc_task *task); void xprt_set_retrans_timeout_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); -void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); -void xprt_write_space(struct rpc_xprt *xprt); +void xprt_wait_for_buffer_space(struct rpc_xprt *xprt); +bool xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); void xprt_update_rtt(struct rpc_task *task); @@ -382,6 +398,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied); void xprt_pin_rqst(struct rpc_rqst *req); void xprt_unpin_rqst(struct rpc_rqst *req); void xprt_release_rqst_cong(struct rpc_task *task); +bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); @@ -400,6 +417,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *); #define XPRT_BINDING (5) #define XPRT_CLOSING (6) #define XPRT_CONGESTED (9) +#define XPRT_CWND_WAIT (10) +#define XPRT_WRITE_SPACE (11) static inline void xprt_set_connected(struct rpc_xprt *xprt) { diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index ae0f99b9b965..458bfe0137f5 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -30,15 +30,25 @@ struct sock_xprt { /* * State of TCP reply receive */ - __be32 tcp_fraghdr, - tcp_xid, - tcp_calldir; + struct { + struct { + __be32 fraghdr, + xid, + calldir; + } __attribute__((packed)); - u32 tcp_offset, - tcp_reclen; + u32 offset, + len; - unsigned long tcp_copied, - tcp_flags; + unsigned long copied; + } recv; + + /* + * State of TCP transmit queue + */ + struct { + u32 offset; + } xmit; /* * Connection of transports @@ -68,20 +78,8 @@ struct sock_xprt { }; /* - * TCP receive state flags - */ -#define TCP_RCV_LAST_FRAG (1UL << 0) -#define TCP_RCV_COPY_FRAGHDR (1UL << 1) -#define TCP_RCV_COPY_XID (1UL << 2) -#define TCP_RCV_COPY_DATA (1UL << 3) -#define TCP_RCV_READ_CALLDIR (1UL << 4) -#define TCP_RCV_COPY_CALLDIR (1UL << 5) - -/* * TCP RPC flags */ -#define TCP_RPC_REPLY (1UL << 6) - #define XPRT_SOCK_CONNECTING 1U #define XPRT_SOCK_DATA_READY (2) #define XPRT_SOCK_UPD_TIMEOUT (3) diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5a28ac9284f0..3f529ad9a9d2 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void) return unlikely(s2idle_state == S2IDLE_STATE_ENTER); } +extern bool pm_suspend_via_s2idle(void); extern void __init pm_states_init(void); extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); extern void s2idle_wake(void); @@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {} static inline void pm_set_resume_via_firmware(void) {} static inline bool pm_suspend_via_firmware(void) { return false; } static inline bool pm_resume_via_firmware(void) { return false; } +static inline bool pm_suspend_via_s2idle(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } diff --git a/include/linux/swap.h b/include/linux/swap.h index cb479bf5842e..d8a07a4f171d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -167,13 +167,14 @@ enum { SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ SWP_BLKDEV = (1 << 6), /* its a block device */ - SWP_FILE = (1 << 7), /* set after swap_activate success */ - SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ - SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ - SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */ - SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */ + SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ + SWP_FS = (1 << 8), /* swap file goes through fs */ + SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ + SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ + SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ + SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ /* add others here before... */ - SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */ + SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL @@ -296,7 +297,7 @@ struct vma_swap_readahead { /* linux/mm/workingset.c */ void *workingset_eviction(struct address_space *mapping, struct page *page); -bool workingset_refault(void *shadow); +void workingset_refault(struct page *page, void *shadow); void workingset_activation(struct page *page); /* Only track the nodes of mappings with shadow entries */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 965be92c33b5..a387b59640a4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, /* Accessory functions. */ -void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, unsigned long attrs); -void swiotlb_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs); - extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, @@ -107,9 +102,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); extern int -swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); - -extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); #ifdef CONFIG_SWIOTLB @@ -121,7 +113,6 @@ static inline unsigned int swiotlb_max_segment(void) { return 0; } #endif extern void swiotlb_print_info(void); -extern int is_swiotlb_buffer(phys_addr_t paddr); extern void swiotlb_set_max_segment(unsigned int); extern const struct dma_map_ops swiotlb_dma_ops; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2ff814c92f7f..2ac3d13a915b 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -60,7 +60,7 @@ struct tms; struct utimbuf; struct mq_attr; struct compat_stat; -struct compat_timeval; +struct old_timeval32; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; @@ -513,7 +513,8 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user * /* fs/utimes.c */ asmlinkage long sys_utimensat(int dfd, const char __user *filename, - struct timespec __user *utimes, int flags); + struct __kernel_timespec __user *utimes, + int flags); /* kernel/acct.c */ asmlinkage long sys_acct(const char __user *name); @@ -613,7 +614,7 @@ asmlinkage long sys_sched_yield(void); asmlinkage long sys_sched_get_priority_max(int policy); asmlinkage long sys_sched_get_priority_min(int policy); asmlinkage long sys_sched_rr_get_interval(pid_t pid, - struct timespec __user *interval); + struct __kernel_timespec __user *interval); /* kernel/signal.c */ asmlinkage long sys_restart_syscall(void); @@ -634,7 +635,7 @@ asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set, asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize); asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, siginfo_t __user *uinfo, - const struct timespec __user *uts, + const struct __kernel_timespec __user *uts, size_t sigsetsize); asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); @@ -829,7 +830,7 @@ asmlinkage long sys_perf_event_open( asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags, - struct timespec __user *timeout); + struct __kernel_timespec __user *timeout); asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, int options, struct rusage __user *ru); @@ -954,8 +955,6 @@ asmlinkage long sys_access(const char __user *filename, int mode); asmlinkage long sys_rename(const char __user *oldname, const char __user *newname); asmlinkage long sys_symlink(const char __user *old, const char __user *new); -asmlinkage long sys_utimes(char __user *filename, - struct timeval __user *utimes); #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) asmlinkage long sys_stat64(const char __user *filename, struct stat64 __user *statbuf); @@ -985,14 +984,18 @@ asmlinkage long sys_alarm(unsigned int seconds); asmlinkage long sys_getpgrp(void); asmlinkage long sys_pause(void); asmlinkage long sys_time(time_t __user *tloc); +#ifdef __ARCH_WANT_SYS_UTIME asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times); +asmlinkage long sys_utimes(char __user *filename, + struct timeval __user *utimes); +asmlinkage long sys_futimesat(int dfd, const char __user *filename, + struct timeval __user *utimes); +#endif asmlinkage long sys_creat(const char __user *pathname, umode_t mode); asmlinkage long sys_getdents(unsigned int fd, struct linux_dirent __user *dirent, unsigned int count); -asmlinkage long sys_futimesat(int dfd, const char __user *filename, - struct timeval __user *utimes); asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, diff --git a/include/linux/tc.h b/include/linux/tc.h index f92511e57cdb..a60639f37963 100644 --- a/include/linux/tc.h +++ b/include/linux/tc.h @@ -84,6 +84,7 @@ struct tc_dev { device. */ struct device dev; /* Generic device interface. */ struct resource resource; /* Address space of this device. */ + u64 dma_mask; /* DMA addressable range. */ char vendor[9]; char name[9]; char firmware[9]; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 263e37271afd..8ed77bb4ed86 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -248,6 +248,9 @@ struct tcp_sock { syn_smc:1; /* SYN includes SMC */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u64 tcp_wstamp_ns; /* departure time for next sent data packet */ + u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ + /* RTT measurement */ u64 tcp_mstamp; /* most recent packet received/sent */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index a3ed26082bc1..bf6ec83e60ee 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Thunderbolt service API * @@ -5,10 +6,6 @@ * Copyright (C) 2017, Intel Corporation * Authors: Michael Jamet <michael.jamet@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef THUNDERBOLT_H_ diff --git a/include/linux/time32.h b/include/linux/time32.h index d1ae43c13e25..61904a6c098f 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -13,6 +13,36 @@ #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +struct old_itimerspec32 { + struct old_timespec32 it_interval; + struct old_timespec32 it_value; +}; + +struct old_utimbuf32 { + old_time32_t actime; + old_time32_t modtime; +}; + +extern int get_old_timespec32(struct timespec64 *, const void __user *); +extern int put_old_timespec32(const struct timespec64 *, void __user *); +extern int get_old_itimerspec32(struct itimerspec64 *its, + const struct old_itimerspec32 __user *uits); +extern int put_old_itimerspec32(const struct itimerspec64 *its, + struct old_itimerspec32 __user *uits); + + #if __BITS_PER_LONG == 64 /* timespec64 is defined as timespec here */ @@ -105,16 +135,6 @@ static inline bool timespec_valid(const struct timespec *ts) return true; } -static inline bool timespec_valid_strict(const struct timespec *ts) -{ - if (!timespec_valid(ts)) - return false; - /* Disallow values that could overflow ktime_t */ - if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) - return false; - return true; -} - /** * timespec_to_ns - Convert timespec to nanoseconds * @ts: pointer to the timespec variable to be converted @@ -149,19 +169,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) a->tv_nsec = ns; } -/** - * time_to_tm - converts the calendar time to local broken-down time - * - * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970, - * Coordinated Universal Time (UTC). - * @offset offset seconds adding to totalsecs. - * @result pointer to struct tm variable to receive broken-down time - */ -static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result) -{ - time64_to_tm(totalsecs, offset, result); -} - static inline unsigned long mktime(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, const unsigned int min, @@ -183,8 +190,6 @@ static inline bool timeval_valid(const struct timeval *tv) return true; } -extern struct timespec timespec_trunc(struct timespec t, unsigned int gran); - /** * timeval_to_ns - Convert timeval to nanoseconds * @ts: pointer to the timeval variable to be converted @@ -208,18 +213,17 @@ extern struct timeval ns_to_timeval(const s64 nsec); extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec); /* - * New aliases for compat time functions. These will be used to replace - * the compat code so it can be shared between 32-bit and 64-bit builds - * both of which provide compatibility with old 32-bit tasks. + * Old names for the 32-bit time_t interfaces, these will be removed + * when everything uses the new names. */ -#define old_time32_t compat_time_t -#define old_timeval32 compat_timeval -#define old_timespec32 compat_timespec -#define old_itimerspec32 compat_itimerspec -#define ns_to_old_timeval32 ns_to_compat_timeval -#define get_old_itimerspec32 get_compat_itimerspec64 -#define put_old_itimerspec32 put_compat_itimerspec64 -#define get_old_timespec32 compat_get_timespec64 -#define put_old_timespec32 compat_put_timespec64 +#define compat_time_t old_time32_t +#define compat_timeval old_timeval32 +#define compat_timespec old_timespec32 +#define compat_itimerspec old_itimerspec32 +#define ns_to_compat_timeval ns_to_old_timeval32 +#define get_compat_itimerspec64 get_old_itimerspec32 +#define put_compat_itimerspec64 put_old_itimerspec32 +#define compat_get_timespec64 get_old_timespec32 +#define compat_put_timespec64 put_old_timespec32 #endif diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index a5a3cfc3c2fa..29975e93fcb8 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -266,9 +266,6 @@ extern int update_persistent_clock64(struct timespec64 now); * deprecated aliases, don't use in new code */ #define getnstimeofday64(ts) ktime_get_real_ts64(ts) -#define get_monotonic_boottime64(ts) ktime_get_boottime_ts64(ts) -#define getrawmonotonic64(ts) ktime_get_raw_ts64(ts) -#define timekeeping_clocktai64(ts) ktime_get_clocktai_ts64(ts) static inline struct timespec64 current_kernel_time64(void) { @@ -279,13 +276,4 @@ static inline struct timespec64 current_kernel_time64(void) return ts; } -static inline struct timespec64 get_monotonic_coarse64(void) -{ - struct timespec64 ts; - - ktime_get_coarse_ts64(&ts); - - return ts; -} - #endif diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index 8762c2f45f8b..a502616f7e1c 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h @@ -6,27 +6,18 @@ * over time so we can remove the file here. */ -extern void do_gettimeofday(struct timeval *tv); -unsigned long get_seconds(void); - -static inline struct timespec current_kernel_time(void) +static inline void do_gettimeofday(struct timeval *tv) { - struct timespec64 ts64; + struct timespec64 now; - ktime_get_coarse_real_ts64(&ts64); - - return timespec64_to_timespec(ts64); + ktime_get_real_ts64(&now); + tv->tv_sec = now.tv_sec; + tv->tv_usec = now.tv_nsec/1000; } -/** - * Deprecated. Use do_settimeofday64(). - */ -static inline int do_settimeofday(const struct timespec *ts) +static inline unsigned long get_seconds(void) { - struct timespec64 ts64; - - ts64 = timespec_to_timespec64(*ts); - return do_settimeofday64(&ts64); + return ktime_get_real_seconds(); } static inline void getnstimeofday(struct timespec *ts) @@ -45,14 +36,6 @@ static inline void ktime_get_ts(struct timespec *ts) *ts = timespec64_to_timespec(ts64); } -static inline void ktime_get_real_ts(struct timespec *ts) -{ - struct timespec64 ts64; - - ktime_get_real_ts64(&ts64); - *ts = timespec64_to_timespec(ts64); -} - static inline void getrawmonotonic(struct timespec *ts) { struct timespec64 ts64; @@ -61,15 +44,6 @@ static inline void getrawmonotonic(struct timespec *ts) *ts = timespec64_to_timespec(ts64); } -static inline struct timespec get_monotonic_coarse(void) -{ - struct timespec64 ts64; - - ktime_get_coarse_ts64(&ts64); - - return timespec64_to_timespec(ts64); -} - static inline void getboottime(struct timespec *ts) { struct timespec64 ts64; @@ -79,19 +53,6 @@ static inline void getboottime(struct timespec *ts) } /* - * Timespec interfaces utilizing the ktime based ones - */ -static inline void get_monotonic_boottime(struct timespec *ts) -{ - *ts = ktime_to_timespec(ktime_get_boottime()); -} - -static inline void timekeeping_clocktai(struct timespec *ts) -{ - *ts = ktime_to_timespec(ktime_get_clocktai()); -} - -/* * Persistent clock related interfaces */ extern void read_persistent_clock(struct timespec *ts); diff --git a/include/linux/torture.h b/include/linux/torture.h index 61dfd93b6ee4..48fad21109fc 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title); int torture_shutdown_init(int ssecs, void (*cleanup)(void)); /* Task stuttering, which forces load/no-load transitions. */ -void stutter_wait(const char *title); +bool stutter_wait(const char *title); int torture_stutter_init(int s); /* Initialization and cleanup. */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 05589a3e37f4..40b0b4c1bf7b 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -123,15 +123,10 @@ static inline __must_check int tracehook_report_syscall_entry( */ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) { - if (step) { - siginfo_t info; - clear_siginfo(&info); - user_single_step_siginfo(current, regs, &info); - force_sig_info(SIGTRAP, &info, current); - return; - } - - ptrace_report_syscall(regs); + if (step) + user_single_step_report(regs); + else + ptrace_report_syscall(regs); } /** diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 22c5a46e9693..49ba9cde7e4b 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -35,6 +35,12 @@ struct tracepoint { struct tracepoint_func __rcu *funcs; }; +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +typedef const int tracepoint_ptr_t; +#else +typedef struct tracepoint * const tracepoint_ptr_t; +#endif + struct bpf_raw_event_map { struct tracepoint *tp; void *bpf_func; diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 041f7e56a289..538ba1a58f5b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -99,6 +99,29 @@ extern void syscall_unregfunc(void); #define TRACE_DEFINE_ENUM(x) #define TRACE_DEFINE_SIZEOF(x) +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS +static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) +{ + return offset_to_ptr(p); +} + +#define __TRACEPOINT_ENTRY(name) \ + asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ + " .balign 4 \n" \ + " .long __tracepoint_" #name " - . \n" \ + " .previous \n") +#else +static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) +{ + return *p; +} + +#define __TRACEPOINT_ENTRY(name) \ + static tracepoint_ptr_t __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ + &__tracepoint_##name +#endif + #endif /* _LINUX_TRACEPOINT_H */ /* @@ -253,19 +276,6 @@ extern void syscall_unregfunc(void); return static_key_false(&__tracepoint_##name.key); \ } -#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS -#define __TRACEPOINT_ENTRY(name) \ - asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ - " .balign 4 \n" \ - " .long __tracepoint_" #name " - . \n" \ - " .previous \n") -#else -#define __TRACEPOINT_ENTRY(name) \ - static struct tracepoint * const __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ - &__tracepoint_##name -#endif - /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration diff --git a/include/linux/tty.h b/include/linux/tty.h index c56e3978b00f..414db2bce715 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -746,8 +746,6 @@ static inline int tty_audit_push(void) /* tty_ioctl.c */ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); -extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg); /* vt.c */ diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 71dbc891851a..358446247ccd 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -249,6 +249,7 @@ struct tty_struct; struct tty_driver; struct serial_icounter_struct; +struct serial_struct; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, @@ -287,6 +288,8 @@ struct tty_operations { int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); + int (*get_serial)(struct tty_struct *tty, struct serial_struct *p); + int (*set_serial)(struct tty_struct *tty, struct serial_struct *p); void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 840894ca3fc0..b1e6043e9917 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -54,11 +54,17 @@ * low-level driver can "grab" an ioctl request before the line * discpline has a chance to see it. * - * long (*compat_ioctl)(struct tty_struct * tty, struct file * file, + * int (*compat_ioctl)(struct tty_struct * tty, struct file * file, * unsigned int cmd, unsigned long arg); * * Process ioctl calls from 32-bit process on 64-bit system * + * NOTE: only ioctls that are neither "pointer to compatible + * structure" nor tty-generic. Something private that takes + * an integer or a pointer to wordsize-sensitive structure + * belongs here, but most of ldiscs will happily leave + * it NULL. + * * void (*set_termios)(struct tty_struct *tty, struct ktermios * old); * * This function notifies the line discpline that a change has @@ -184,7 +190,7 @@ struct tty_ldisc_ops { const unsigned char *buf, size_t nr); int (*ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); - long (*compat_ioctl)(struct tty_struct *tty, struct file *file, + int (*compat_ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios *old); __poll_t (*poll)(struct tty_struct *, struct file *, diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 6f8b68cd460f..a3cd7cb67a69 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -133,6 +133,7 @@ extern void uio_event_notify(struct uio_info *info); #define UIO_MEM_PHYS 1 #define UIO_MEM_LOGICAL 2 #define UIO_MEM_VIRTUAL 3 +#define UIO_MEM_IOVA 4 /* defines for uio_port->porttype */ #define UIO_PORT_NONE 0 diff --git a/include/linux/umh.h b/include/linux/umh.h index 5c812acbb80a..235f51b62c71 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -44,6 +44,7 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data); struct umh_info { + const char *cmdline; struct file *pipe_to_umh; struct file *pipe_from_umh; pid_t pid; diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 07f99362bc90..63758c399e4e 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -77,6 +77,12 @@ struct ci_hdrc_platform_data { struct ci_hdrc_cable vbus_extcon; struct ci_hdrc_cable id_extcon; u32 phy_clkgate_delay_us; + + /* pins */ + struct pinctrl *pctl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_host; + struct pinctrl_state *pins_device; }; /* Default offset of capability registers */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 106551a5616e..1c19f77ed541 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -285,6 +285,8 @@ struct usb_serial_driver { int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); + int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); + int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss); void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); void (*break_ctl)(struct tty_struct *tty, int break_state); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index e2ec3582e549..d8860f2d0976 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -28,7 +28,7 @@ struct usbnet { /* housekeeping */ struct usb_device *udev; struct usb_interface *intf; - struct driver_info *driver_info; + const struct driver_info *driver_info; const char *driver_name; void *driver_priv; wait_queue_head_t wait; diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 9397628a1967..cb462f9ab7dd 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -5,6 +5,24 @@ #include <linux/if_vlan.h> #include <uapi/linux/virtio_net.h> +static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, + const struct virtio_net_hdr *hdr) +{ + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_UDP: + skb->protocol = cpu_to_be16(ETH_P_IP); + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + skb->protocol = cpu_to_be16(ETH_P_IPV6); + break; + default: + return -EINVAL; + } + + return 0; +} + static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, const struct virtio_net_hdr *hdr, bool little_endian) diff --git a/include/linux/wait.h b/include/linux/wait.h index d9f131ecf708..ed7c122cb31f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -1052,10 +1052,9 @@ do { \ __ret; \ }) -#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \ - lock, timeout) \ +#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \ ___wait_event(wq_head, ___wait_cond_timeout(condition), \ - TASK_INTERRUPTIBLE, 0, timeout, \ + state, 0, timeout, \ spin_unlock_irq(&lock); \ __ret = schedule_timeout(__ret); \ spin_lock_irq(&lock)); @@ -1089,8 +1088,19 @@ do { \ ({ \ long __ret = timeout; \ if (!___wait_cond_timeout(condition)) \ - __ret = __wait_event_interruptible_lock_irq_timeout( \ - wq_head, condition, lock, timeout); \ + __ret = __wait_event_lock_irq_timeout( \ + wq_head, condition, lock, timeout, \ + TASK_INTERRUPTIBLE); \ + __ret; \ +}) + +#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \ +({ \ + long __ret = timeout; \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_lock_irq_timeout( \ + wq_head, condition, lock, timeout, \ + TASK_UNINTERRUPTIBLE); \ __ret; \ }) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index fdfd04e348f6..738a0c24874f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, * * @bio is a part of the writeback in progress controlled by @wbc. Perform * writeback specific initialization. This is used to apply the cgroup - * writeback context. + * writeback context. Must be called after the bio has been associated with + * a device. */ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { @@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) * regular writeback instead of writing things out itself. */ if (wbc->wb) - bio_associate_blkcg(bio, wbc->wb->blkcg_css); + bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); } #else /* CONFIG_CGROUP_WRITEBACK */ diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h index ea73fef8bdc0..8586cfb49828 100644 --- a/include/media/v4l2-fh.h +++ b/include/media/v4l2-fh.h @@ -38,10 +38,13 @@ struct v4l2_ctrl_handler; * @prio: priority of the file handler, as defined by &enum v4l2_priority * * @wait: event' s wait queue + * @subscribe_lock: serialise changes to the subscribed list; guarantee that + * the add and del event callbacks are orderly called * @subscribed: list of subscribed events * @available: list of events waiting to be dequeued * @navailable: number of available events at @available list * @sequence: event sequence number + * * @m2m_ctx: pointer to &struct v4l2_m2m_ctx */ struct v4l2_fh { @@ -52,6 +55,7 @@ struct v4l2_fh { /* Events */ wait_queue_head_t wait; + struct mutex subscribe_lock; struct list_head subscribed; struct list_head available; unsigned int navailable; diff --git a/include/net/act_api.h b/include/net/act_api.h index 970303448c90..05c7df41d737 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -13,7 +13,7 @@ #include <net/netns/generic.h> struct tcf_idrinfo { - spinlock_t lock; + struct mutex lock; struct idr action_idr; }; @@ -31,10 +31,12 @@ struct tc_action { int tcfa_action; struct tcf_t tcfa_tm; struct gnet_stats_basic_packed tcfa_bstats; + struct gnet_stats_basic_packed tcfa_bstats_hw; struct gnet_stats_queue tcfa_qstats; struct net_rate_estimator __rcu *tcfa_rate_est; spinlock_t tcfa_lock; struct gnet_stats_basic_cpu __percpu *cpu_bstats; + struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; struct gnet_stats_queue __percpu *cpu_qstats; struct tc_cookie __rcu *act_cookie; struct tcf_chain *goto_chain; @@ -85,8 +87,7 @@ struct tc_action_ops { struct tcf_result *); /* called under RCU BH lock*/ int (*dump)(struct sk_buff *, struct tc_action *, int, int); void (*cleanup)(struct tc_action *); - int (*lookup)(struct net *net, struct tc_action **a, u32 index, - struct netlink_ext_ack *extack); + int (*lookup)(struct net *net, struct tc_action **a, u32 index); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, int ovr, int bind, bool rtnl_held, @@ -95,7 +96,7 @@ struct tc_action_ops { struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); - void (*stats_update)(struct tc_action *, u64, u32, u64); + void (*stats_update)(struct tc_action *, u64, u32, u64, bool); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a); void (*put_dev)(struct net_device *dev); @@ -116,7 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn, if (!tn->idrinfo) return -ENOMEM; tn->ops = ops; - spin_lock_init(&tn->idrinfo->lock); + mutex_init(&tn->idrinfo->lock); idr_init(&tn->idrinfo->action_idr); return err; } @@ -183,13 +184,13 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); #endif /* CONFIG_NET_CLS_ACT */ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, - u64 packets, u64 lastuse) + u64 packets, u64 lastuse, bool hw) { #ifdef CONFIG_NET_CLS_ACT if (!a->ops->stats_update) return; - a->ops->stats_update(a, bytes, packets, lastuse); + a->ops->stats_update(a, bytes, packets, lastuse, hw); #endif } diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 6def0351bcc3..14b789a123e7 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -265,6 +265,11 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly; struct ipv6_bpf_stub { int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len, bool force_bind_address_no_port, bool with_lock); + struct sock *(*udp6_lib_lookup)(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, __be16 dport, + int dif, int sdif, struct udp_table *tbl, + struct sk_buff *skb); }; extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index f53edb3754bc..de587948042a 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -13,6 +13,7 @@ #define _NET_RXRPC_H #include <linux/rxrpc.h> +#include <linux/ktime.h> struct key; struct sock; @@ -77,5 +78,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, enum rxrpc_call_completion *, u32 *); u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *); +u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, + ktime_t *); #endif /* _NET_RXRPC_H */ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index a5ba41b3b867..e2695c4bf358 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -52,7 +52,7 @@ struct unix_skb_parms { struct unix_sock { /* WARNING: sk has to be the first member */ struct sock sk; - struct unix_address *addr; + struct unix_address *addr; struct path path; struct mutex iolock, bindlock; struct sock *peer; @@ -63,7 +63,7 @@ struct unix_sock { #define UNIX_GC_CANDIDATE 0 #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; - wait_queue_entry_t peer_wake; + wait_queue_entry_t peer_wake; }; static inline struct unix_sock *unix_sk(const struct sock *sk) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index cdd9f1fe7cfa..c36dc1e20556 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1517,6 +1517,20 @@ struct hci_cp_le_write_def_data_len { __le16 tx_time; } __packed; +#define HCI_OP_LE_ADD_TO_RESOLV_LIST 0x2027 +struct hci_cp_le_add_to_resolv_list { + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 peer_irk[16]; + __u8 local_irk[16]; +} __packed; + +#define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028 +struct hci_cp_le_del_from_resolv_list { + __u8 bdaddr_type; + bdaddr_t bdaddr; +} __packed; + #define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029 #define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0db1b9b428b7..e5ea633ea368 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -103,6 +103,14 @@ struct bdaddr_list { u8 bdaddr_type; }; +struct bdaddr_list_with_irk { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; + u8 peer_irk[16]; + u8 local_irk[16]; +}; + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -259,6 +267,8 @@ struct hci_dev { __u16 le_max_tx_time; __u16 le_max_rx_len; __u16 le_max_rx_time; + __u8 le_max_key_size; + __u8 le_min_key_size; __u16 discov_interleaved_timeout; __u16 conn_info_min_age; __u16 conn_info_max_age; @@ -1058,8 +1068,15 @@ int hci_inquiry(void __user *arg); struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, bdaddr_t *bdaddr, u8 type); +struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( + struct list_head *list, bdaddr_t *bdaddr, + u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); +int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u8 *peer_irk, u8 *local_irk); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); +int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 0697fd413087..093aedebdf0c 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -277,12 +277,19 @@ struct l2cap_conn_rsp { #define L2CAP_CR_SEC_BLOCK 0x0003 #define L2CAP_CR_NO_MEM 0x0004 #define L2CAP_CR_BAD_AMP 0x0005 -#define L2CAP_CR_AUTHENTICATION 0x0005 -#define L2CAP_CR_AUTHORIZATION 0x0006 -#define L2CAP_CR_BAD_KEY_SIZE 0x0007 -#define L2CAP_CR_ENCRYPTION 0x0008 -#define L2CAP_CR_INVALID_SCID 0x0009 -#define L2CAP_CR_SCID_IN_USE 0x000A +#define L2CAP_CR_INVALID_SCID 0x0006 +#define L2CAP_CR_SCID_IN_USE 0x0007 + +/* credit based connect results */ +#define L2CAP_CR_LE_SUCCESS 0x0000 +#define L2CAP_CR_LE_BAD_PSM 0x0002 +#define L2CAP_CR_LE_NO_MEM 0x0004 +#define L2CAP_CR_LE_AUTHENTICATION 0x0005 +#define L2CAP_CR_LE_AUTHORIZATION 0x0006 +#define L2CAP_CR_LE_BAD_KEY_SIZE 0x0007 +#define L2CAP_CR_LE_ENCRYPTION 0x0008 +#define L2CAP_CR_LE_INVALID_SCID 0x0009 +#define L2CAP_CR_LE_SCID_IN_USE 0X000A /* connect/create channel status */ #define L2CAP_CS_NO_INFO 0x0000 @@ -455,9 +462,6 @@ struct l2cap_conn_param_update_rsp { #define L2CAP_CONN_PARAM_ACCEPTED 0x0000 #define L2CAP_CONN_PARAM_REJECTED 0x0001 -#define L2CAP_LE_MAX_CREDITS 10 -#define L2CAP_LE_DEFAULT_MPS 230 - struct l2cap_le_conn_req { __le16 psm; __le16 scid; diff --git a/include/net/bonding.h b/include/net/bonding.h index a2d058170ea3..b46d68acf701 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -139,12 +139,6 @@ struct bond_parm_tbl { int mode; }; -struct netdev_notify_work { - struct delayed_work work; - struct net_device *dev; - struct netdev_bonding_info bonding_info; -}; - struct slave { struct net_device *dev; /* first - useful for panic debug */ struct bonding *bond; /* our master */ @@ -172,6 +166,7 @@ struct slave { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *np; #endif + struct delayed_work notify_work; struct kobject kobj; struct rtnl_link_stats64 slave_stats; }; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8ebabc9873d1..1fa41b7a1be3 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -149,7 +149,7 @@ enum ieee80211_channel_flags { */ struct ieee80211_channel { enum nl80211_band band; - u16 center_freq; + u32 center_freq; u16 hw_value; u32 flags; int max_antenna_gain; @@ -775,6 +775,12 @@ struct cfg80211_crypto_settings { * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) + * @ftm_responder: enable FTM responder functionality; -1 for no change + * (which also implies no change in LCI/civic location data) + * @lci: LCI subelement content + * @civicloc: Civic location subelement content + * @lci_len: LCI data length + * @civicloc_len: Civic location data length */ struct cfg80211_beacon_data { const u8 *head, *tail; @@ -782,12 +788,17 @@ struct cfg80211_beacon_data { const u8 *proberesp_ies; const u8 *assocresp_ies; const u8 *probe_resp; + const u8 *lci; + const u8 *civicloc; + s8 ftm_responder; size_t head_len, tail_len; size_t beacon_ies_len; size_t proberesp_ies_len; size_t assocresp_ies_len; size_t probe_resp_len; + size_t lci_len; + size_t civicloc_len; }; struct mac_address { @@ -849,6 +860,7 @@ struct cfg80211_bitrate_mask { * @beacon_rate: bitrate to be used for beacons * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) + * @he_cap: HE capabilities (or %NULL if HE isn't enabled) * @ht_required: stations must support HT * @vht_required: stations must support VHT */ @@ -874,6 +886,7 @@ struct cfg80211_ap_settings { const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; + const struct ieee80211_he_cap_elem *he_cap; bool ht_required, vht_required; }; @@ -1290,6 +1303,10 @@ struct cfg80211_tid_stats { * @ack_signal: signal strength (in dBm) of the last ACK frame. * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has * been sent. + * @rx_mpdu_count: number of MPDUs received from this station + * @fcs_err_count: number of packets (MPDUs) received from this station with + * an FCS error. This counter should be incremented only when TA of the + * received packet with an FCS error matches the peer MAC address. */ struct station_info { u64 filled; @@ -1336,6 +1353,9 @@ struct station_info { struct cfg80211_tid_stats *pertid; s8 ack_signal; s8 avg_ack_signal; + + u32 rx_mpdu_count; + u32 fcs_err_count; }; #if IS_ENABLED(CONFIG_CFG80211) @@ -2795,6 +2815,40 @@ struct cfg80211_external_auth_params { }; /** + * cfg80211_ftm_responder_stats - FTM responder statistics + * + * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to + * indicate the relevant values in this struct for them + * @success_num: number of FTM sessions in which all frames were successfully + * answered + * @partial_num: number of FTM sessions in which part of frames were + * successfully answered + * @failed_num: number of failed FTM sessions + * @asap_num: number of ASAP FTM sessions + * @non_asap_num: number of non-ASAP FTM sessions + * @total_duration_ms: total sessions durations - gives an indication + * of how much time the responder was busy + * @unknown_triggers_num: number of unknown FTM triggers - triggers from + * initiators that didn't finish successfully the negotiation phase with + * the responder + * @reschedule_requests_num: number of FTM reschedule requests - initiator asks + * for a new scheduling although it already has scheduled FTM slot + * @out_of_window_triggers_num: total FTM triggers out of scheduled window + */ +struct cfg80211_ftm_responder_stats { + u32 filled; + u32 success_num; + u32 partial_num; + u32 failed_num; + u32 asap_num; + u32 non_asap_num; + u64 total_duration_ms; + u32 unknown_triggers_num; + u32 reschedule_requests_num; + u32 out_of_window_triggers_num; +}; + +/** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks @@ -3126,6 +3180,9 @@ struct cfg80211_external_auth_params { * * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter * tells the driver that the frame should not be encrypted. + * + * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. + * Statistics should be cumulative, currently no way to reset is provided. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -3431,6 +3488,10 @@ struct cfg80211_ops { const u8 *buf, size_t len, const u8 *dest, const __be16 proto, const bool noencrypt); + + int (*get_ftm_responder_stats)(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ftm_responder_stats *ftm_stats); }; /* @@ -3958,7 +4019,6 @@ struct wiphy_iftype_ext_capab { * by the driver in the .connect() callback. The bit position maps to the * attribute indices defined in &enum nl80211_bss_select_attr. * - * @cookie_counter: unique generic cookie counter, used to identify objects. * @nan_supported_bands: bands supported by the device in NAN mode, a * bitmap of &enum nl80211_band values. For instance, for * NL80211_BAND_2GHZ, bit 0 would be set @@ -4097,8 +4157,6 @@ struct wiphy { u32 bss_select_support; - u64 cookie_counter; - u8 nan_supported_bands; u32 txq_limit; @@ -4733,6 +4791,17 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, int len); /** + * cfg80211_send_layer2_update - send layer 2 update frame + * + * @dev: network device + * @addr: STA MAC address + * + * Wireless drivers can use this function to update forwarding tables in bridge + * devices upon STA association. + */ +void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr); + +/** * DOC: Regulatory enforcement infrastructure * * TODO @@ -4852,8 +4921,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator); * * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. * @freq: the freqency(in MHz) to be queried. - * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if - * irrelevant). This can be used later for deduplication. * @rule: pointer to store the wmm rule from the regulatory db. * * Self-managed wireless drivers can use this function to query diff --git a/include/net/devlink.h b/include/net/devlink.h index b9b89d6604d4..45db0c79462d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -298,7 +298,7 @@ struct devlink_resource { #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 -#define DEVLINK_PARAM_MAX_STRING_VALUE 32 +#define __DEVLINK_PARAM_MAX_STRING_VALUE 32 enum devlink_param_type { DEVLINK_PARAM_TYPE_U8, DEVLINK_PARAM_TYPE_U16, @@ -311,7 +311,7 @@ union devlink_param_value { u8 vu8; u16 vu16; u32 vu32; - const char *vstr; + char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE]; bool vbool; }; @@ -362,6 +362,9 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_MAX_MACS, DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -380,6 +383,15 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable" #define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL +#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME "ignore_ari" +#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE DEVLINK_PARAM_TYPE_BOOL + +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME "msix_vec_per_pf_max" +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE DEVLINK_PARAM_TYPE_U32 + +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min" +#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32 + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ @@ -451,11 +463,14 @@ struct devlink_ops { u32 *p_cur, u32 *p_max); int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode); - int (*eswitch_mode_set)(struct devlink *devlink, u16 mode); + int (*eswitch_mode_set)(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode); - int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode); + int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode, + struct netlink_ext_ack *extack); int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode); - int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode); + int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) @@ -553,6 +568,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val); void devlink_param_value_changed(struct devlink *devlink, u32 param_id); +void devlink_param_value_str_fill(union devlink_param_value *dst_val, + const char *src); struct devlink_region *devlink_region_create(struct devlink *devlink, const char *region_name, u32 region_max_snapshots, @@ -789,6 +806,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id) { } +static inline void +devlink_param_value_str_fill(union devlink_param_value *dst_val, + const char *src) +{ +} + static inline struct devlink_region * devlink_region_create(struct devlink *devlink, const char *region_name, diff --git a/include/net/dsa.h b/include/net/dsa.h index 461e8a7661b7..23690c44e167 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -35,6 +35,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_BRCM_PREPEND, DSA_TAG_PROTO_DSA, DSA_TAG_PROTO_EDSA, + DSA_TAG_PROTO_GSWIP, DSA_TAG_PROTO_KSZ, DSA_TAG_PROTO_LAN9303, DSA_TAG_PROTO_MTK, diff --git a/include/net/dst.h b/include/net/dst.h index 7f735e76ca73..6cf0870414c7 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) dst->ops->update_pmtu(dst, NULL, skb, mtu); } +static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, + struct dst_entry *encap_dst, + int headroom) +{ + u32 encap_mtu = dst_mtu(encap_dst); + + if (skb->len > encap_mtu - headroom) + skb_dst_update_pmtu(skb, encap_mtu - headroom); +} + #endif /* _NET_DST_H */ diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 883bb9085f15..946bd53a9f81 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -44,6 +44,10 @@ void __gnet_stats_copy_basic(const seqcount_t *running, struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); +int gnet_stats_copy_basic_hw(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); int gnet_stats_copy_rate_est(struct gnet_dump *d, struct net_rate_estimator __rcu **ptr); int gnet_stats_copy_queue(struct gnet_dump *d, diff --git a/include/net/genetlink.h b/include/net/genetlink.h index decf6012a401..aa2e5888f18d 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -112,7 +112,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) #define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg) static inline int genl_err_attr(struct genl_info *info, int err, - struct nlattr *attr) + const struct nlattr *attr) { info->extack->bad_attr = attr; diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index feef706e1158..8014153bdd49 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -75,6 +75,8 @@ enum ieee80211_radiotap_presence { IEEE80211_RADIOTAP_TIMESTAMP = 22, IEEE80211_RADIOTAP_HE = 23, IEEE80211_RADIOTAP_HE_MU = 24, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26, + IEEE80211_RADIOTAP_LSIG = 27, /* valid in every it_present bitmap, even vendor namespaces */ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29, @@ -325,6 +327,25 @@ enum ieee80211_radiotap_he_mu_bits { IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800, }; +enum ieee80211_radiotap_lsig_data1 { + IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 0x0001, + IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 0x0002, +}; + +enum ieee80211_radiotap_lsig_data2 { + IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 0x000f, + IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 0xfff0, +}; + +struct ieee80211_radiotap_lsig { + __le16 data1, data2; +}; + +enum ieee80211_radiotap_zero_len_psdu_type { + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0, + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff, +}; + /** * ieee80211_get_radiotap_len - get radiotap header length */ diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 482a1b705362..c8e2bebd8d93 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -183,8 +183,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) * 1 if something is broken and should be logged (!!! above) * 2 if packet should be dropped */ -static inline int INET_ECN_decapsulate(struct sk_buff *skb, - __u8 outer, __u8 inner) +static inline int __INET_ECN_decapsulate(__u8 outer, __u8 inner, bool *set_ce) { if (INET_ECN_is_not_ect(inner)) { switch (outer & INET_ECN_MASK) { @@ -198,10 +197,21 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb, } } - if (INET_ECN_is_ce(outer)) + *set_ce = INET_ECN_is_ce(outer); + return 0; +} + +static inline int INET_ECN_decapsulate(struct sk_buff *skb, + __u8 outer, __u8 inner) +{ + bool set_ce = false; + int rc; + + rc = __INET_ECN_decapsulate(outer, inner, &set_ce); + if (!rc && set_ce) INET_ECN_set_ce(skb); - return 0; + return rc; } static inline int IP_ECN_decapsulate(const struct iphdr *oiph, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index e03b93360f33..a80fd0ac4563 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk, return sk->sk_bound_dev_if; } -static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq) -{ - return rcu_dereference_check(ireq->ireq_opt, - refcount_read(&ireq->req.rsk_refcnt) > 0); -} - struct inet_cork { unsigned int flags; __be32 addr; diff --git a/include/net/ip.h b/include/net/ip.h index e44b1a44f67a..72593e171d14 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -420,8 +420,35 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); } -int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, - u32 *metrics); +struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, + int fc_mx_len); +static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) +{ + if (fib_metrics != &dst_default_metrics && + refcount_dec_and_test(&fib_metrics->refcnt)) + kfree(fib_metrics); +} + +/* ipv4 and ipv6 both use refcounted metrics if it is not the default */ +static inline +void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics) +{ + dst_init_metrics(dst, fib_metrics->metrics, true); + + if (fib_metrics != &dst_default_metrics) { + dst->_metrics |= DST_METRICS_REFCOUNTED; + refcount_inc(&fib_metrics->refcnt); + } +} + +static inline +void ip_dst_metrics_put(struct dst_entry *dst) +{ + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); + + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); +} u32 ip_idents_reserve(u32 hash, int segs); void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 3d4930528db0..84097010237c 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -159,6 +159,10 @@ struct fib6_info { struct rt6_info * __percpu *rt6i_pcpu; struct rt6_exception_bucket __rcu *rt6i_exception_bucket; +#ifdef CONFIG_IPV6_ROUTER_PREF + unsigned long last_probe; +#endif + u32 fib6_metric; u8 fib6_protocol; u8 fib6_type; @@ -182,7 +186,6 @@ struct rt6_info { struct in6_addr rt6i_gateway; struct inet6_dev *rt6i_idev; u32 rt6i_flags; - struct rt6key rt6i_prefsrc; struct list_head rt6i_uncached; struct uncached_list *rt6i_uncached_list; @@ -408,11 +411,33 @@ struct fib6_node *fib6_locate(struct fib6_node *root, void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg), void *arg); +void fib6_clean_all_skip_notify(struct net *net, + int (*func)(struct fib6_info *, void *arg), + void *arg); int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack); int fib6_del(struct fib6_info *rt, struct nl_info *info); +static inline +void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr) +{ + const struct fib6_info *from; + + rcu_read_lock(); + + from = rcu_dereference(rt->from); + if (from) { + *addr = from->fib6_prefsrc.addr; + } else { + struct in6_addr in6_zero = {}; + + *addr = in6_zero; + } + + rcu_read_unlock(); +} + static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i) { return f6i->fib6_nh.nh_dev; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 7b9c82de11cc..7ab119936e69 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -165,8 +165,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid); -void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, - u32 mark); +void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif); void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); struct netlink_callback; @@ -175,6 +174,7 @@ struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; struct net *net; + struct fib_dump_filter filter; }; int rt6_dump_route(struct fib6_info *f6i, void *p_arg); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 69c91d1934c1..c5969762a8f4 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -222,6 +222,17 @@ struct fib_table { unsigned long __data[0]; }; +struct fib_dump_filter { + u32 table_id; + /* filter_set is an optimization that an entry is set */ + bool filter_set; + bool dump_all_families; + unsigned char protocol; + unsigned char rt_type; + unsigned int flags; + struct net_device *dev; +}; + int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags); int fib_table_insert(struct net *, struct fib_table *, struct fib_config *, @@ -229,7 +240,7 @@ int fib_table_insert(struct net *, struct fib_table *, struct fib_config *, int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, struct netlink_ext_ack *extack); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, - struct netlink_callback *cb); + struct netlink_callback *cb, struct fib_dump_filter *filter); int fib_table_flush(struct net *net, struct fib_table *table); struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); void fib_table_flush_external(struct fib_table *table); @@ -373,6 +384,7 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net, extern const struct nla_policy rtm_ipv4_policy[]; void ip_fib_init(void); __be32 fib_compute_spec_dst(struct sk_buff *skb); +bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); @@ -394,6 +406,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); int fib_sync_down_addr(struct net_device *dev, __be32 local); int fib_sync_up(struct net_device *dev, unsigned int nh_flags); +void fib_sync_mtu(struct net_device *dev, u32 orig_mtu); #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, @@ -451,4 +464,7 @@ static inline void fib_proc_exit(struct net *net) u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr); +int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh, + struct fib_dump_filter *filter, + struct netlink_callback *cb); #endif /* _NET_FIB_H */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ff33f498c137..829650540780 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1089,8 +1089,6 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } #endif #ifdef CONFIG_SYSCTL -extern struct ctl_table ipv6_route_table_template[]; - struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); struct ctl_table *ipv6_route_sysctl_init(struct net *net); int ipv6_sysctl_register(void); diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index f4c21b5a1242..14a490246be9 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -80,6 +80,11 @@ struct af_iucv_trans_hdr { u8 pad; /* total 104 bytes */ } __packed; +static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb) +{ + return (struct af_iucv_trans_hdr *)skb_network_header(skb); +} + enum iucv_tx_notify { /* transmission of skb is completed and was successful */ TX_NOTIFY_OK = 0, diff --git a/include/net/llc.h b/include/net/llc.h index 890a87318014..df282d9b4017 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -66,6 +66,7 @@ struct llc_sap { int sk_count; struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES]; struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES]; + struct rcu_head rcu; }; static inline diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5790f55c241d..71985e95d2d9 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -101,8 +101,9 @@ * Drivers indicate that they use this model by implementing the .wake_tx_queue * driver operation. * - * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a - * single per-vif queue for multicast data frames. + * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with + * another per-sta for non-data/non-mgmt and bufferable management frames, and + * a single per-vif queue for multicast data frames. * * The driver is expected to initialize its private per-queue data for stations * and interfaces in the .add_interface and .sta_add ops. @@ -308,6 +309,8 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected * keep alive) changed. * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface + * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder + * functionality changed for this BSS (AP mode). * */ enum ieee80211_bss_change { @@ -337,6 +340,7 @@ enum ieee80211_bss_change { BSS_CHANGED_MU_GROUPS = 1<<23, BSS_CHANGED_KEEP_ALIVE = 1<<24, BSS_CHANGED_MCAST_RATE = 1<<25, + BSS_CHANGED_FTM_RESPONDER = 1<<26, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -463,6 +467,21 @@ struct ieee80211_mu_group_data { }; /** + * ieee80211_ftm_responder_params - FTM responder parameters + * + * @lci: LCI subelement content + * @civicloc: CIVIC location subelement content + * @lci_len: LCI data length + * @civicloc_len: Civic data length + */ +struct ieee80211_ftm_responder_params { + const u8 *lci; + const u8 *civicloc; + size_t lci_len; + size_t civicloc_len; +}; + +/** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association @@ -561,6 +580,9 @@ struct ieee80211_mu_group_data { * @protected_keep_alive: if set, indicates that the station should send an RSN * protected frame to the AP to reset the idle timer at the AP for the * station. + * @ftm_responder: whether to enable or disable fine timing measurement FTM + * responder functionality. + * @ftmr_params: configurable lci/civic parameter when enabling FTM responder. */ struct ieee80211_bss_conf { const u8 *bssid; @@ -611,6 +633,8 @@ struct ieee80211_bss_conf { bool allow_p2p_go_ps; u16 max_idle_period; bool protected_keep_alive; + bool ftm_responder; + struct ieee80211_ftm_responder_params *ftmr_params; }; /** @@ -1140,6 +1164,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * from the RX info data, so leave those zeroed when building this data) * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present * (&struct ieee80211_radiotap_he_mu) + * @RX_FLAG_RADIOTAP_LSIG: L-SIG radiotap data is present + * @RX_FLAG_NO_PSDU: use the frame only for radiotap reporting, with + * the "0-length PSDU" field included there. The value for it is + * in &struct ieee80211_rx_status. Note that if this value isn't + * known the frame shouldn't be reported. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1170,6 +1199,8 @@ enum mac80211_rx_flags { RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25), RX_FLAG_RADIOTAP_HE = BIT(26), RX_FLAG_RADIOTAP_HE_MU = BIT(27), + RX_FLAG_RADIOTAP_LSIG = BIT(28), + RX_FLAG_NO_PSDU = BIT(29), }; /** @@ -1242,6 +1273,7 @@ enum mac80211_rx_encoding { * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU * @ampdu_delimiter_crc: A-MPDU delimiter CRC + * @zero_length_psdu_type: radiotap type of the 0-length PSDU */ struct ieee80211_rx_status { u64 mactime; @@ -1262,6 +1294,7 @@ struct ieee80211_rx_status { u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 ampdu_delimiter_crc; + u8 zero_length_psdu_type; }; /** @@ -1504,6 +1537,8 @@ enum ieee80211_vif_flags { * @drv_priv: data area for driver use, will always be aligned to * sizeof(void \*). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) + * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped, + * protected by fq->lock. */ struct ieee80211_vif { enum nl80211_iftype type; @@ -1528,6 +1563,8 @@ struct ieee80211_vif { unsigned int probe_req_reg; + bool txqs_stopped[IEEE80211_NUM_ACS]; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; @@ -1839,7 +1876,9 @@ struct ieee80211_sta_rates { * unlimited. * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. - * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) + * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID + * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that + * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames */ struct ieee80211_sta { u32 supp_rates[NUM_NL80211_BANDS]; @@ -1879,8 +1918,9 @@ struct ieee80211_sta { u16 max_amsdu_len; bool support_p2p_ps; u16 max_rc_amsdu_len; + u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS]; - struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; + struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); @@ -1914,7 +1954,8 @@ struct ieee80211_tx_control { * * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @sta: station table entry, %NULL for per-vif queue - * @tid: the TID for this queue (unused for per-vif queue) + * @tid: the TID for this queue (unused for per-vif queue), + * %IEEE80211_NUM_TIDS for non-data (if enabled) * @ac: the AC for this queue * @drv_priv: driver private area, sized by hw->txq_data_size * @@ -2127,6 +2168,19 @@ struct ieee80211_txq { * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't * support QoS NDP for AP probing - that's most likely a driver bug. * + * @IEEE80211_HW_BUFF_MMPDU_TXQ: use the TXQ for bufferable MMPDUs, this of + * course requires the driver to use TXQs to start with. + * + * @IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW: (Hardware) rate control supports VHT + * extended NSS BW (dot11VHTExtendedNSSBWCapable). This flag will be set if + * the selected rate control algorithm sets %RATE_CTRL_CAPA_VHT_EXT_NSS_BW + * but if the rate control is built-in then it must be set by the driver. + * See also the documentation for that flag. + * + * @IEEE80211_HW_STA_MMPDU_TXQ: use the extra non-TID per-station TXQ for all + * MMPDUs on station interfaces. This of course requires the driver to use + * TXQs to start with. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2172,6 +2226,9 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP, + IEEE80211_HW_BUFF_MMPDU_TXQ, + IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, + IEEE80211_HW_STA_MMPDU_TXQ, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2290,6 +2347,10 @@ enum ieee80211_hw_flags { * supported by HW. * @max_nan_de_entries: maximum number of NAN DE functions supported by the * device. + * + * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from + * them are encountered. The default should typically not be changed, + * unless the driver has good reasons for needing more buffers. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -2325,6 +2386,7 @@ struct ieee80211_hw { u8 n_cipher_schemes; const struct ieee80211_cipher_scheme *cipher_schemes; u8 max_nan_de_entries; + u8 tx_sk_pacing_shift; }; static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, @@ -2506,6 +2568,19 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * The set_default_unicast_key() call updates the default WEP key index * configured to the hardware for WEP encryption type. This is required * for devices that support offload of data packets (e.g. ARP responses). + * + * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag + * when they are able to replace in-use PTK keys according to to following + * requirements: + * 1) They do not hand over frames decrypted with the old key to + mac80211 once the call to set_key() with command %DISABLE_KEY has been + completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key, + 2) either drop or continue to use the old key for any outgoing frames queued + at the time of the key deletion (including re-transmits), + 3) never send out a frame queued prior to the set_key() %SET_KEY command + encrypted with the new key and + 4) never send out a frame unencrypted when it should be encrypted. + Mac80211 will not queue any new frames for a deleted key to the driver. */ /** @@ -3542,6 +3617,12 @@ enum ieee80211_reconfig_type { * @del_nan_func: Remove a NAN function. The driver must call * ieee80211_nan_func_terminated() with * NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal. + * @can_aggregate_in_amsdu: Called in order to determine if HW supports + * aggregating two specific frames in the same A-MSDU. The relation + * between the skbs should be symmetric and transitive. Note that while + * skb is always a real frame, head may or may not be an A-MSDU. + * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. + * Statistics should be cumulative, currently no way to reset is provided. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -3824,6 +3905,12 @@ struct ieee80211_ops { void (*del_nan_func)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 instance_id); + bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb); + int (*get_ftm_responder_stats)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_ftm_responder_stats *ftm_stats); }; /** @@ -4293,6 +4380,21 @@ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta, u32 thr); /** + * ieee80211_tx_rate_update - transmit rate update callback + * + * Drivers should call this functions with a non-NULL pub sta + * This function can be used in drivers that does not have provision + * in updating the tx rate in data path. + * + * @hw: the hardware the frame was transmitted by + * @pubsta: the station to update the tx rate for. + * @info: tx status information + */ +void ieee80211_tx_rate_update(struct ieee80211_hw *hw, + struct ieee80211_sta *pubsta, + struct ieee80211_tx_info *info); + +/** * ieee80211_tx_status - transmit status callback * * Call this function for all transmitted frames after they have been @@ -5644,7 +5746,22 @@ struct ieee80211_tx_rate_control { bool bss; }; +/** + * enum rate_control_capabilities - rate control capabilities + */ +enum rate_control_capabilities { + /** + * @RATE_CTRL_CAPA_VHT_EXT_NSS_BW: + * Support for extended NSS BW support (dot11VHTExtendedNSSCapable) + * Note that this is only looked at if the minimum number of chains + * that the AP uses is < the number of TX chains the hardware has, + * otherwise the NSS difference doesn't bother us. + */ + RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0), +}; + struct rate_control_ops { + unsigned long capa; const char *name; void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); void (*free)(void *priv); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 6c1eecd56a4d..f58b384aa6c9 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -323,6 +323,7 @@ void __neigh_set_probe_once(struct neighbour *neigh); bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); +int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev); int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); @@ -544,4 +545,19 @@ static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags, *notify = 1; } } + +static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags, + int *notify) +{ + u8 ndm_flags = 0; + + ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0; + if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) { + if (ndm_flags & NTF_ROUTER) + neigh->flags |= NTF_ROUTER; + else + neigh->flags &= ~NTF_ROUTER; + *notify = 1; + } +} #endif diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 9b5fdc50519a..99d4148e0f90 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -43,6 +43,7 @@ struct ctl_table_header; struct net_generic; struct uevent_sock; struct netns_ipvs; +struct bpf_prog; #define NETDEV_HASHBITS 8 @@ -145,6 +146,8 @@ struct net { #endif struct net_generic __rcu *gen; + struct bpf_prog __rcu *flow_dissector_prog; + /* Note : following structs are cache line aligned */ #ifdef CONFIG_XFRM struct netns_xfrm xfrm; diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index c84b51682f08..135ee702c7b0 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -10,20 +10,17 @@ #ifndef _NF_CONNTRACK_IPV4_H #define _NF_CONNTRACK_IPV4_H -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp; #endif #ifdef CONFIG_NF_CT_PROTO_SCTP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite; #endif -int nf_conntrack_ipv4_compat_init(void); -void nf_conntrack_ipv4_compat_fini(void); - #endif /*_NF_CONNTRACK_IPV4_H*/ diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index effa8dfba68c..7b3c873f8839 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -2,20 +2,7 @@ #ifndef _NF_CONNTRACK_IPV6_H #define _NF_CONNTRACK_IPV6_H -extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; - -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; -#ifdef CONFIG_NF_CT_PROTO_DCCP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; -#endif -#ifdef CONFIG_NF_CT_PROTO_SCTP -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; -#endif -#ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; -#endif #include <linux/sysctl.h> extern struct ctl_table nf_ct_ipv6_sysctl_table[]; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 2a3e0974a6af..afc9b3620473 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -20,8 +20,7 @@ /* This header is used to share core functionality between the standalone connection tracking module, and the compatibility layer's use of connection tracking. */ -unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, - struct sk_buff *skb); +unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state); int nf_conntrack_init_net(struct net *net); void nf_conntrack_cleanup_net(struct net *net); diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 8465263b297d..eed04af9b75e 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -18,9 +18,6 @@ struct seq_file; struct nf_conntrack_l4proto { - /* L3 Protocol number. */ - u_int16_t l3proto; - /* L4 Protocol number. */ u_int8_t l4proto; @@ -43,22 +40,14 @@ struct nf_conntrack_l4proto { /* Returns verdict for packet, or -1 for invalid. */ int (*packet)(struct nf_conn *ct, - const struct sk_buff *skb, + struct sk_buff *skb, unsigned int dataoff, - enum ip_conntrack_info ctinfo); - - /* Called when a new connection for this protocol found; - * returns TRUE if it's OK. If so, packet() called next. */ - bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff); + enum ip_conntrack_info ctinfo, + const struct nf_hook_state *state); /* Called when a conntrack entry is destroyed */ void (*destroy)(struct nf_conn *ct); - int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, - unsigned int dataoff, - u_int8_t pf, unsigned int hooknum); - /* called by gc worker if table is full */ bool (*can_early_drop)(const struct nf_conn *ct); @@ -92,7 +81,7 @@ struct nf_conntrack_l4proto { #endif unsigned int *net_id; /* Init l4proto pernet data */ - int (*init_net)(struct net *net, u_int16_t proto); + int (*init_net)(struct net *net); /* Return the per-net protocol part. */ struct nf_proto_net *(*get_net_proto)(struct net *net); @@ -101,16 +90,23 @@ struct nf_conntrack_l4proto { struct module *me; }; +int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state); + +int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, + struct sk_buff *skb, + unsigned int dataoff, + const struct nf_hook_state *state); /* Existing built-in generic protocol */ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; -#define MAX_NF_CT_PROTO 256 +#define MAX_NF_CT_PROTO IPPROTO_UDPLITE -const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto, - u_int8_t l4proto); +const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto); -const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto, - u_int8_t l4proto); +const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto); void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p); /* Protocol pernet registration. */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 0e355f4a3d76..77e2761d4f2f 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -99,7 +99,7 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table, void (*iter)(struct flow_offload *flow, void *data), void *data); -void nf_flow_table_cleanup(struct net *net, struct net_device *dev); +void nf_flow_table_cleanup(struct net_device *dev); int nf_flow_table_init(struct nf_flowtable *flow_table); void nf_flow_table_free(struct nf_flowtable *flow_table); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 0f39ac487012..841835a387e1 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -470,6 +470,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding); void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding); +void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, + struct nft_set_binding *binding); +void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); /** * enum nft_set_extensions - set extension type IDs @@ -724,7 +727,9 @@ struct nft_expr_type { * @eval: Expression evaluation function * @size: full expression size, including private data size * @init: initialization function - * @destroy: destruction function + * @activate: activate expression in the next generation + * @deactivate: deactivate expression in next generation + * @destroy: destruction function, called after synchronize_rcu * @dump: function to dump parameters * @type: expression type * @validate: validate expression, called during loop detection @@ -1293,12 +1298,14 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext) * * @list: used internally * @msg_type: message type + * @put_net: ctx->net needs to be put * @ctx: transaction context * @data: internal information related to the transaction */ struct nft_trans { struct list_head list; int msg_type; + bool put_net; struct nft_ctx ctx; char data[0]; }; diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 8da837d2aaf9..2046d104f323 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -16,6 +16,10 @@ extern struct nft_expr_type nft_meta_type; extern struct nft_expr_type nft_rt_type; extern struct nft_expr_type nft_exthdr_type; +#ifdef CONFIG_NETWORK_SECMARK +extern struct nft_object_type nft_secmark_obj_type; +#endif + int nf_tables_core_module_init(void); void nf_tables_core_module_exit(void); diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h deleted file mode 100644 index ea32a7d3cf1b..000000000000 --- a/include/net/netfilter/nfnetlink_log.h +++ /dev/null @@ -1 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 0c154f98e987..4c1e99303b5a 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -153,7 +153,7 @@ * nla_find() find attribute in stream of attributes * nla_find_nested() find attribute in nested attributes * nla_parse() parse and validate stream of attrs - * nla_parse_nested() parse nested attribuets + * nla_parse_nested() parse nested attributes * nla_for_each_attr() loop over all attributes * nla_for_each_nested() loop over the nested attributes *========================================================================= @@ -172,7 +172,7 @@ enum { NLA_FLAG, NLA_MSECS, NLA_NESTED, - NLA_NESTED_COMPAT, + NLA_NESTED_ARRAY, NLA_NUL_STRING, NLA_BINARY, NLA_S8, @@ -180,14 +180,28 @@ enum { NLA_S32, NLA_S64, NLA_BITFIELD32, + NLA_REJECT, + NLA_EXACT_LEN, + NLA_EXACT_LEN_WARN, __NLA_TYPE_MAX, }; #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1) +enum nla_policy_validation { + NLA_VALIDATE_NONE, + NLA_VALIDATE_RANGE, + NLA_VALIDATE_MIN, + NLA_VALIDATE_MAX, + NLA_VALIDATE_FUNCTION, +}; + /** * struct nla_policy - attribute validation policy * @type: Type of attribute or NLA_UNSPEC + * @validation_type: type of attribute validation done in addition to + * type-specific validation (e.g. range, function call), see + * &enum nla_policy_validation * @len: Type specific length of payload * * Policies are defined as arrays of this struct, the array must be @@ -198,9 +212,11 @@ enum { * NLA_NUL_STRING Maximum length of string (excluding NUL) * NLA_FLAG Unused * NLA_BINARY Maximum length of attribute payload - * NLA_NESTED Don't use `len' field -- length verification is - * done by checking len of nested header (or empty) - * NLA_NESTED_COMPAT Minimum length of structure payload + * NLA_NESTED, + * NLA_NESTED_ARRAY Length verification is done by checking len of + * nested header (or empty); len field is used if + * validation_data is also used, for the max attr + * number in the nested policy. * NLA_U8, NLA_U16, * NLA_U32, NLA_U64, * NLA_S8, NLA_S16, @@ -208,9 +224,59 @@ enum { * NLA_MSECS Leaving the length field zero will verify the * given type fits, using it verifies minimum length * just like "All other" - * NLA_BITFIELD32 A 32-bit bitmap/bitselector attribute + * NLA_BITFIELD32 Unused + * NLA_REJECT Unused + * NLA_EXACT_LEN Attribute must have exactly this length, otherwise + * it is rejected. + * NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning + * is logged if it is longer, shorter is rejected. * All other Minimum length of attribute payload * + * Meaning of `validation_data' field: + * NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and + * validation data must point to a u32 value of valid + * flags + * NLA_REJECT This attribute is always rejected and validation data + * may point to a string to report as the error instead + * of the generic one in extended ACK. + * NLA_NESTED Points to a nested policy to validate, must also set + * `len' to the max attribute number. + * Note that nla_parse() will validate, but of course not + * parse, the nested sub-policies. + * NLA_NESTED_ARRAY Points to a nested policy to validate, must also set + * `len' to the max attribute number. The difference to + * NLA_NESTED is the structure - NLA_NESTED has the + * nested attributes directly inside, while an array has + * the nested attributes at another level down and the + * attributes directly in the nesting don't matter. + * All other Unused - but note that it's a union + * + * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX + * and NLA_POLICY_RANGE: + * NLA_U8, + * NLA_U16, + * NLA_U32, + * NLA_U64, + * NLA_S8, + * NLA_S16, + * NLA_S32, + * NLA_S64 These are used depending on the validation_type + * field, if that is min/max/range then the minimum, + * maximum and both are used (respectively) to check + * the value of the integer attribute. + * Note that in the interest of code simplicity and + * struct size both limits are s16, so you cannot + * enforce a range that doesn't fall within the range + * of s16 - do that as usual in the code instead. + * All other Unused - but note that it's a union + * + * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN: + * NLA_BINARY Validation function called for the attribute, + * not compatible with use of the validation_data + * as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and + * NLA_NESTED_ARRAY. + * All other Unused - but note that it's a union + * * Example: * static const struct nla_policy my_policy[ATTR_MAX+1] = { * [ATTR_FOO] = { .type = NLA_U16 }, @@ -220,11 +286,69 @@ enum { * }; */ struct nla_policy { - u16 type; + u8 type; + u8 validation_type; u16 len; - void *validation_data; + union { + const void *validation_data; + struct { + s16 min, max; + }; + int (*validate)(const struct nlattr *attr, + struct netlink_ext_ack *extack); + }; }; +#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len } +#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \ + .len = _len } + +#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN) +#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN) + +#define NLA_POLICY_NESTED(maxattr, policy) \ + { .type = NLA_NESTED, .validation_data = policy, .len = maxattr } +#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \ + { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr } + +#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) +#define NLA_ENSURE_INT_TYPE(tp) \ + (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \ + tp == NLA_S16 || tp == NLA_U16 || \ + tp == NLA_S32 || tp == NLA_U32 || \ + tp == NLA_S64 || tp == NLA_U64) + tp) +#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \ + (__NLA_ENSURE(tp != NLA_BITFIELD32 && \ + tp != NLA_REJECT && \ + tp != NLA_NESTED && \ + tp != NLA_NESTED_ARRAY) + tp) + +#define NLA_POLICY_RANGE(tp, _min, _max) { \ + .type = NLA_ENSURE_INT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_RANGE, \ + .min = _min, \ + .max = _max \ +} + +#define NLA_POLICY_MIN(tp, _min) { \ + .type = NLA_ENSURE_INT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_MIN, \ + .min = _min, \ +} + +#define NLA_POLICY_MAX(tp, _max) { \ + .type = NLA_ENSURE_INT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_MAX, \ + .max = _max, \ +} + +#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \ + .type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \ + .validation_type = NLA_VALIDATE_FUNCTION, \ + .validate = fn, \ + .len = __VA_ARGS__ + 0, \ +} + /** * struct nl_info - netlink source information * @nlh: Netlink message header of original request @@ -249,6 +373,9 @@ int nla_validate(const struct nlattr *head, int len, int maxtype, int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy, struct netlink_ext_ack *extack); +int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head, + int len, const struct nla_policy *policy, + struct netlink_ext_ack *extack); int nla_policy_len(const struct nla_policy *, int); struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); @@ -392,13 +519,29 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, const struct nla_policy *policy, struct netlink_ext_ack *extack) { - if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) { + NL_SET_ERR_MSG(extack, "Invalid header length"); return -EINVAL; + } return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), policy, extack); } +static inline int nlmsg_parse_strict(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr *tb[], int maxtype, + const struct nla_policy *policy, + struct netlink_ext_ack *extack) +{ + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) { + NL_SET_ERR_MSG(extack, "Invalid header length"); + return -EINVAL; + } + + return nla_parse_strict(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), policy, extack); +} + /** * nlmsg_find_attr - find a specific attribute in a netlink message * @nlh: netlink message header diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index f0e396ab9bec..ef1ed529f33c 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -45,6 +45,7 @@ struct netns_sysctl_ipv6 { int max_dst_opts_len; int max_hbh_opts_len; int seg6_flowlabel; + bool skip_notify_on_dev_down; }; struct netns_ipv6 { diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 75a3f3fdb359..72ffb3120ced 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -65,11 +65,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return block->q; } -static inline struct net_device *tcf_block_dev(struct tcf_block *block) -{ - return tcf_block_q(block)->dev_queue->dev; -} - void *tcf_block_cb_priv(struct tcf_block_cb *block_cb); struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); @@ -122,11 +117,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) return NULL; } -static inline struct net_device *tcf_block_dev(struct tcf_block *block) -{ - return NULL; -} - static inline int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_priv) @@ -318,7 +308,7 @@ tcf_exts_stats_update(const struct tcf_exts *exts, for (i = 0; i < exts->nr_actions; i++) { struct tc_action *a = exts->actions[i]; - tcf_action_stats_update(a, bytes, packets, lastuse); + tcf_action_stats_update(a, bytes, packets, lastuse, true); } preempt_enable(); diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 7dc769e5452b..a16fbe9a2a67 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -102,6 +102,7 @@ int qdisc_set_default(const char *id); void qdisc_hash_add(struct Qdisc *q, bool invisible); void qdisc_hash_del(struct Qdisc *q); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); +struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle); struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab, struct netlink_ext_ack *extack); diff --git a/include/net/route.h b/include/net/route.h index bb53cdba38dc..9883dc82f723 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -201,10 +201,9 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, - u32 mark, u8 protocol, int flow_flags); + u8 protocol); void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu); -void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, - u8 protocol, int flow_flags); +void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol); void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk); void ip_rt_send_redirect(struct sk_buff *skb); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 0bbaa5488423..cf26e5aacac4 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -165,6 +165,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, struct netlink_ext_ack *exterr); +struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a6d00093f35e..4d736427a4cb 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -19,6 +19,7 @@ struct Qdisc_ops; struct qdisc_walker; struct tcf_walker; struct module; +struct bpf_flow_keys; typedef int tc_setup_cb_t(enum tc_setup_type type, void *type_data, void *cb_priv); @@ -105,6 +106,7 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; spinlock_t seqlock; + struct rcu_head rcu; }; static inline void qdisc_refcount_inc(struct Qdisc *qdisc) @@ -114,6 +116,19 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc) refcount_inc(&qdisc->refcnt); } +/* Intended to be used by unlocked users, when concurrent qdisc release is + * possible. + */ + +static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN) + return qdisc; + if (refcount_inc_not_zero(&qdisc->refcnt)) + return qdisc; + return NULL; +} + static inline bool qdisc_is_running(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) @@ -307,9 +322,14 @@ struct tcf_proto { }; struct qdisc_skb_cb { - unsigned int pkt_len; - u16 slave_dev_queue_mapping; - u16 tc_classid; + union { + struct { + unsigned int pkt_len; + u16 slave_dev_queue_mapping; + u16 tc_classid; + }; + struct bpf_flow_keys *flow_keys; + }; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; }; @@ -331,7 +351,7 @@ struct tcf_chain { struct tcf_block { struct list_head chain_list; u32 index; /* block index for shared blocks */ - unsigned int refcnt; + refcount_t refcnt; struct net *net; struct Qdisc *q; struct list_head cb_list; @@ -343,6 +363,7 @@ struct tcf_block { struct tcf_chain *chain; struct list_head filter_chain_list; } chain0; + struct rcu_head rcu; }; static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) @@ -362,7 +383,7 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) } static inline void -tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt, +tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt, u32 *flags, bool add) { if (add) { @@ -554,7 +575,8 @@ void dev_deactivate_many(struct list_head *head); struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc); void qdisc_reset(struct Qdisc *qdisc); -void qdisc_destroy(struct Qdisc *qdisc); +void qdisc_put(struct Qdisc *qdisc); +void qdisc_put_unlocked(struct Qdisc *qdisc); void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, unsigned int len); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, @@ -828,8 +850,8 @@ static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) qh->qlen = 0; } -static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, - struct qdisc_skb_head *qh) +static inline void __qdisc_enqueue_tail(struct sk_buff *skb, + struct qdisc_skb_head *qh) { struct sk_buff *last = qh->tail; @@ -842,14 +864,24 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, qh->head = skb; } qh->qlen++; - qdisc_qstats_backlog_inc(sch, skb); +} +static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) +{ + __qdisc_enqueue_tail(skb, &sch->q); + qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } -static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) +static inline void __qdisc_enqueue_head(struct sk_buff *skb, + struct qdisc_skb_head *qh) { - return __qdisc_enqueue_tail(skb, sch, &sch->q); + skb->next = qh->head; + + if (!qh->head) + qh->tail = skb; + qh->head = skb; + qh->qlen++; } static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 86f034b524d4..8dadc74c22e7 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -148,11 +148,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) #define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \ a->chunk_hdr->type == SCTP_CID_I_DATA) -/* Calculate the actual data size in a data chunk */ -#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \ - (unsigned long)(c->chunk_hdr) - \ - sctp_datachk_len(&c->asoc->stream))) - /* Internal error codes */ enum sctp_ierror { SCTP_IERROR_NO_ERROR = 0, diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 5ef1bad81ef5..9e3d32746430 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) __u16 size; size = ntohs(chunk->chunk_hdr->length); - size -= sctp_datahdr_len(&chunk->asoc->stream); + size -= sctp_datachk_len(&chunk->asoc->stream); return size; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 28a7c8e44636..a11f93790476 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -876,6 +876,8 @@ struct sctp_transport { unsigned long sackdelay; __u32 sackfreq; + atomic_t mtu_info; + /* When was the last time that we heard from this transport? We use * this to pick new active and retran paths. */ diff --git a/include/net/sock.h b/include/net/sock.h index 433f45fc2d68..f665d74ae509 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -422,8 +422,8 @@ struct sock { struct timer_list sk_timer; __u32 sk_priority; __u32 sk_mark; - u32 sk_pacing_rate; /* bytes per second */ - u32 sk_max_pacing_rate; + unsigned long sk_pacing_rate; /* bytes per second */ + unsigned long sk_max_pacing_rate; struct page_frag sk_frag; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; @@ -800,6 +800,7 @@ enum sock_flags { SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ SOCK_TXTIME, + SOCK_XDP, /* XDP is attached */ }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1491,6 +1492,7 @@ static inline void lock_sock(struct sock *sk) lock_sock_nested(sk, 0); } +void __release_sock(struct sock *sk); void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ @@ -2057,14 +2059,20 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq) /** * sock_poll_wait - place memory barrier behind the poll_wait call. * @filp: file + * @sock: socket to wait on * @p: poll_table * * See the comments in the wq_has_sleeper function. + * + * Do not derive sock from filp->private_data here. An SMC socket establishes + * an internal TCP socket that is used in the fallback case. All socket + * operations on the SMC socket are then forwarded to the TCP socket. In case of + * poll, the filp->private_data pointer references the SMC socket because the + * TCP socket has no file assigned. */ -static inline void sock_poll_wait(struct file *filp, poll_table *p) +static inline void sock_poll_wait(struct file *filp, struct socket *sock, + poll_table *p) { - struct socket *sock = filp->private_data; - if (!poll_does_not_wait(p)) { poll_wait(filp, &sock->wq->wait, p); /* We need to be sure we are in sync with the @@ -2212,10 +2220,6 @@ static inline struct page_frag *sk_page_frag(struct sock *sk) bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); -int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg, - int sg_start, int *sg_curr, unsigned int *sg_size, - int first_coalesce); - /* * Default write policy as shown to user space via poll/select/SIGIO */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index d574ce63bf22..881ecb1555bf 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -145,6 +145,10 @@ enum switchdev_notifier_type { SWITCHDEV_FDB_ADD_TO_DEVICE, SWITCHDEV_FDB_DEL_TO_DEVICE, SWITCHDEV_FDB_OFFLOADED, + + SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE, + SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE, + SWITCHDEV_VXLAN_FDB_OFFLOADED, }; struct switchdev_notifier_info { @@ -155,7 +159,8 @@ struct switchdev_notifier_fdb_info { struct switchdev_notifier_info info; /* must be first */ const unsigned char *addr; u16 vid; - bool added_by_user; + u8 added_by_user:1, + offloaded:1; }; static inline struct net_device * diff --git a/include/net/tcp.h b/include/net/tcp.h index 770917d0caa7..a18914d20486 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -732,7 +732,7 @@ void tcp_send_window_probe(struct sock *sk); static inline u64 tcp_clock_ns(void) { - return local_clock(); + return ktime_get_ns(); } static inline u64 tcp_clock_us(void) @@ -752,17 +752,7 @@ static inline u32 tcp_time_stamp_raw(void) return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); } - -/* Refresh 1us clock of a TCP socket, - * ensuring monotically increasing values. - */ -static inline void tcp_mstamp_refresh(struct tcp_sock *tp) -{ - u64 val = tcp_clock_us(); - - if (val > tp->tcp_mstamp) - tp->tcp_mstamp = val; -} +void tcp_mstamp_refresh(struct tcp_sock *tp); static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) { @@ -771,7 +761,13 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) { - return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); + return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ); +} + +/* provide the departure time in us unit */ +static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) +{ + return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); } @@ -817,7 +813,7 @@ struct tcp_skb_cb { #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ #define TCPCB_LOST 0x04 /* SKB is lost */ #define TCPCB_TAGBITS 0x07 /* All tag bits */ -#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */ +#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */ #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ TCPCB_REPAIRED) @@ -862,6 +858,21 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); } +static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS; +} + +static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->bpf.sk_redir; +} + +static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) +{ + TCP_SKB_CB(skb)->bpf.sk_redir = NULL; +} + #if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, * as TCP moves IP6CB into a different location in skb->cb[] @@ -1234,8 +1245,31 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk) return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; } +/* Return in jiffies the delay before one skb is sent. + * If @skb is NULL, we look at EDT for next packet being sent on the socket. + */ +static inline unsigned long tcp_pacing_delay(const struct sock *sk, + const struct sk_buff *skb) +{ + s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns; + + pacing_delay -= tcp_sk(sk)->tcp_clock_cache; + + return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0; +} + +static inline void tcp_reset_xmit_timer(struct sock *sk, + const int what, + unsigned long when, + const unsigned long max_when, + const struct sk_buff *skb) +{ + inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb), + max_when); +} + /* Something is really bad, we could not queue an additional packet, - * because qdisc is full or receiver sent a 0 window. + * because qdisc is full or receiver sent a 0 window, or we are paced. * We do not want to add fuel to the fire, or abort too early, * so make sure the timer we arm now is at least 200ms in the future, * regardless of current icsk_rto value (as it could be ~2ms) @@ -1257,8 +1291,9 @@ static inline unsigned long tcp_probe0_when(const struct sock *sk, static inline void tcp_check_probe_timer(struct sock *sk) { if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - tcp_probe0_base(sk), TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, + tcp_probe0_base(sk), TCP_RTO_MAX, + NULL); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) @@ -1940,7 +1975,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) { const struct sk_buff *skb = tcp_rtx_queue_head(sk); u32 rto = inet_csk(sk)->icsk_rto; - u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); + u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; } @@ -2040,11 +2075,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); #define TCP_ULP_MAX 128 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) -enum { - TCP_ULP_TLS, - TCP_ULP_BPF, -}; - struct tcp_ulp_ops { struct list_head list; @@ -2053,15 +2083,12 @@ struct tcp_ulp_ops { /* cleanup ulp */ void (*release)(struct sock *sk); - int uid; char name[TCP_ULP_NAME_MAX]; - bool user_visible; struct module *owner; }; int tcp_register_ulp(struct tcp_ulp_ops *type); void tcp_unregister_ulp(struct tcp_ulp_ops *type); int tcp_set_ulp(struct sock *sk, const char *name); -int tcp_set_ulp_id(struct sock *sk, const int ulp); void tcp_get_available_ulp(char *buf, size_t len); void tcp_cleanup_ulp(struct sock *sk); @@ -2069,6 +2096,18 @@ void tcp_cleanup_ulp(struct sock *sk); __MODULE_INFO(alias, alias_userspace, name); \ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) +struct sk_msg; +struct sk_psock; + +int tcp_bpf_init(struct sock *sk); +void tcp_bpf_reinit(struct sock *sk); +int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, + int flags); +int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int nonblock, int flags, int *addr_len); +int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, + struct msghdr *msg, int len, int flags); + /* Call BPF_SOCK_OPS program that returns an int. If the return value * is < 0, then the BPF op failed (for example if the loaded BPF * program does not support the chosen operation or there is no BPF diff --git a/include/net/tls.h b/include/net/tls.h index 0a769cf2f5f3..bab5627ff5e3 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -39,9 +39,11 @@ #include <linux/crypto.h> #include <linux/socket.h> #include <linux/tcp.h> +#include <linux/skmsg.h> + #include <net/tcp.h> #include <net/strparser.h> - +#include <crypto/aead.h> #include <uapi/linux/tls.h> @@ -93,24 +95,45 @@ enum { TLS_NUM_CONFIG, }; -struct tls_sw_context_tx { - struct crypto_aead *aead_send; - struct crypto_wait async_wait; - - char aad_space[TLS_AAD_SPACE_SIZE]; - - unsigned int sg_plaintext_size; - int sg_plaintext_num_elem; - struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS]; +/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages + * allocated or mapped for each TLS record. After encryption, the records are + * stores in a linked list. + */ +struct tls_rec { + struct list_head list; + int tx_ready; + int tx_flags; + int inplace_crypto; - unsigned int sg_encrypted_size; - int sg_encrypted_num_elem; - struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS]; + struct sk_msg msg_plaintext; + struct sk_msg msg_encrypted; - /* AAD | sg_plaintext_data | sg_tag */ + /* AAD | msg_plaintext.sg.data | sg_tag */ struct scatterlist sg_aead_in[2]; - /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */ + /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ struct scatterlist sg_aead_out[2]; + + char aad_space[TLS_AAD_SPACE_SIZE]; + struct aead_request aead_req; + u8 aead_req_ctx[]; +}; + +struct tx_work { + struct delayed_work work; + struct sock *sk; +}; + +struct tls_sw_context_tx { + struct crypto_aead *aead_send; + struct crypto_wait async_wait; + struct tx_work tx_work; + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; + int async_notify; + +#define BIT_TX_SCHEDULED 0 + unsigned long tx_bitmask; }; struct tls_sw_context_rx { @@ -119,11 +142,12 @@ struct tls_sw_context_rx { struct strparser strp; void (*saved_data_ready)(struct sock *sk); - unsigned int (*sk_poll)(struct file *file, struct socket *sock, - struct poll_table_struct *wait); + struct sk_buff *recv_pkt; u8 control; bool decrypted; + atomic_t decrypt_pending; + bool async_notify; }; struct tls_record_info { @@ -195,10 +219,11 @@ struct tls_context { struct scatterlist *partially_sent_record; u16 partially_sent_offset; + unsigned long flags; bool in_tcp_sendpages; + bool pending_open_record_frags; - u16 pending_open_record_frags; int (*push_pending_record)(struct sock *sk, int flags); void (*sk_write_space)(struct sock *sk); @@ -246,8 +271,7 @@ void tls_sw_free_resources_rx(struct sock *sk); void tls_sw_release_resources_rx(struct sock *sk); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); -unsigned int tls_sw_poll(struct file *file, struct socket *sock, - struct poll_table_struct *wait); +bool tls_sw_stream_read(const struct sock *sk); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); @@ -259,6 +283,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, void tls_device_sk_destruct(struct sock *sk); void tls_device_init(void); void tls_device_cleanup(void); +int tls_tx_records(struct sock *sk, int flags); struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); @@ -277,6 +302,9 @@ void tls_sk_destruct(struct sock *sk, struct tls_context *ctx); int tls_push_sg(struct sock *sk, struct tls_context *ctx, struct scatterlist *sg, u16 first_offset, int flags); +int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, + int flags); + int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx, int flags, long *timeo); @@ -310,6 +338,17 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) return tls_ctx->pending_open_record_frags; } +static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) +{ + struct tls_rec *rec; + + rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); + if (!rec) + return false; + + return READ_ONCE(rec->tx_ready); +} + struct sk_buff * tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb); diff --git a/include/net/udp.h b/include/net/udp.h index 8482a990b0bb..9e82cb391dea 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -443,8 +443,10 @@ int udpv4_offload_init(void); void udp_init(void); +DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); void udp_encap_enable(void); #if IS_ENABLED(CONFIG_IPV6) +DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void); #endif diff --git a/include/net/vxlan.h b/include/net/vxlan.h index b99a02ae3934..03431c148e16 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -5,7 +5,8 @@ #include <linux/if_vlan.h> #include <net/udp_tunnel.h> #include <net/dst_metadata.h> -#include <net/udp_tunnel.h> +#include <net/rtnetlink.h> +#include <net/switchdev.h> /* VXLAN protocol (RFC 7348) header: * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -191,6 +192,7 @@ union vxlan_addr { struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; + u8 offloaded:1; __be32 remote_vni; u32 remote_ifindex; struct list_head list; @@ -371,4 +373,65 @@ static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs) return vs->sock->sk->sk_family; } +#if IS_ENABLED(CONFIG_IPV6) + +static inline bool vxlan_addr_any(const union vxlan_addr *ipa) +{ + if (ipa->sa.sa_family == AF_INET6) + return ipv6_addr_any(&ipa->sin6.sin6_addr); + else + return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); +} + +static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) +{ + if (ipa->sa.sa_family == AF_INET6) + return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); + else + return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); +} + +#else /* !IS_ENABLED(CONFIG_IPV6) */ + +static inline bool vxlan_addr_any(const union vxlan_addr *ipa) +{ + return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY); +} + +static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) +{ + return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); +} + +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +static inline bool netif_is_vxlan(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "vxlan"); +} + +struct switchdev_notifier_vxlan_fdb_info { + struct switchdev_notifier_info info; /* must be first */ + union vxlan_addr remote_ip; + __be16 remote_port; + __be32 remote_vni; + u32 remote_ifindex; + u8 eth_addr[ETH_ALEN]; + __be32 vni; + bool offloaded; +}; + +#if IS_ENABLED(CONFIG_VXLAN) +int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, + struct switchdev_notifier_vxlan_fdb_info *fdb_info); +#else +static inline int +vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, + struct switchdev_notifier_vxlan_fdb_info *fdb_info) +{ + return -ENOENT; +} +#endif + #endif diff --git a/include/net/xdp.h b/include/net/xdp.h index 76b95256c266..0f25b3675c5c 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -91,6 +91,8 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame) frame->dev_rx = NULL; } +struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); + /* Convert xdp_buff to xdp_frame */ static inline struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) @@ -99,9 +101,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) int metasize; int headroom; - /* TODO: implement clone, copy, use "native" MEM_TYPE */ if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) - return NULL; + return xdp_convert_zc_to_xdp_frame(xdp); /* Assure headroom is available for storing info */ headroom = xdp->data - xdp->data_hard_start; @@ -135,6 +136,7 @@ void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, enum xdp_mem_type type, void *allocator); +void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); /* Drivers not supporting XDP metadata can use this helper, which * rejects any room expansion for metadata as a result. diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 7161856bcf9c..13acb9803a6d 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -16,21 +16,23 @@ struct net_device; struct xsk_queue; -struct xdp_umem_props { - u64 chunk_mask; - u64 size; -}; - struct xdp_umem_page { void *addr; dma_addr_t dma; }; +struct xdp_umem_fq_reuse { + u32 nentries; + u32 length; + u64 handles[]; +}; + struct xdp_umem { struct xsk_queue *fq; struct xsk_queue *cq; struct xdp_umem_page *pages; - struct xdp_umem_props props; + u64 chunk_mask; + u64 size; u32 headroom; u32 chunk_size_nohr; struct user_struct *user; @@ -41,6 +43,7 @@ struct xdp_umem { struct page **pgs; u32 npgs; struct net_device *dev; + struct xdp_umem_fq_reuse *fq_reuse; u16 queue_id; bool zc; spinlock_t xsk_list_lock; @@ -79,6 +82,50 @@ void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); void xsk_umem_consume_tx_done(struct xdp_umem *umem); +struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); +struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq); +void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); +struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); + +static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); +} + +static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) +{ + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); +} + +/* Reuse-queue aware version of FILL queue helpers */ +static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (!rq->length) + return xsk_umem_peek_addr(umem, addr); + + *addr = rq->handles[rq->length - 1]; + return addr; +} + +static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + if (!rq->length) + xsk_umem_discard_addr(umem); + else + rq->length--; +} + +static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) +{ + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; + + rq->handles[rq->length++] = addr; +} #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { @@ -98,6 +145,74 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) { return false; } + +static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) +{ + return NULL; +} + +static inline void xsk_umem_discard_addr(struct xdp_umem *umem) +{ +} + +static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) +{ +} + +static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, + u32 *len) +{ + return false; +} + +static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) +{ +} + +static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) +{ + return NULL; +} + +static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( + struct xdp_umem *umem, + struct xdp_umem_fq_reuse *newq) +{ + return NULL; +} +static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) +{ +} + +static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, + u16 queue_id) +{ + return NULL; +} + +static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) +{ + return NULL; +} + +static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) +{ + return 0; +} + +static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) +{ + return NULL; +} + +static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) +{ +} + +static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) +{ +} + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 77c7908b7d73..2734c895c1bf 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -46,7 +46,6 @@ #include <net/ip.h> #include <rdma/ib_verbs.h> #include <rdma/ib_pack.h> -#include <net/ipv6.h> #include <net/net_namespace.h> /** @@ -95,20 +94,18 @@ int rdma_translate_ip(const struct sockaddr *addr, * @timeout_ms: Amount of time to wait for the address resolution to complete. * @callback: Call invoked once address resolution has completed, timed out, * or been canceled. A status of 0 indicates success. + * @resolve_by_gid_attr: Resolve the ip based on the GID attribute from + * rdma_dev_addr. * @context: User-specified context associated with the call. */ int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr, - struct rdma_dev_addr *addr, int timeout_ms, + struct rdma_dev_addr *addr, unsigned long timeout_ms, void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context), - void *context); + bool resolve_by_gid_attr, void *context); void rdma_addr_cancel(struct rdma_dev_addr *addr); -void rdma_copy_addr(struct rdma_dev_addr *dev_addr, - const struct net_device *dev, - const unsigned char *dst_dev_addr); - int rdma_addr_size(const struct sockaddr *addr); int rdma_addr_size_in6(struct sockaddr_in6 *addr); int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr); diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index c10f4b5ea8ab..49f4f75499b3 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -583,7 +583,7 @@ struct ib_cm_sidr_req_param { struct sa_path_rec *path; const struct ib_gid_attr *sgid_attr; __be64 service_id; - int timeout_ms; + unsigned long timeout_ms; const void *private_data; u8 private_data_len; u8 max_cm_retries; diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index b6ddf2a1b9d8..19520979b84c 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -449,28 +449,23 @@ struct ib_sa_query; void ib_sa_cancel_query(int id, struct ib_sa_query *query); -int ib_sa_path_rec_get(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, - struct sa_path_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, gfp_t gfp_mask, - void (*callback)(int status, - struct sa_path_rec *resp, +int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, + u8 port_num, struct sa_path_rec *rec, + ib_sa_comp_mask comp_mask, unsigned long timeout_ms, + gfp_t gfp_mask, + void (*callback)(int status, struct sa_path_rec *resp, void *context), - void *context, - struct ib_sa_query **query); + void *context, struct ib_sa_query **query); int ib_sa_service_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, - u8 method, - struct ib_sa_service_rec *rec, - ib_sa_comp_mask comp_mask, - int timeout_ms, gfp_t gfp_mask, - void (*callback)(int status, - struct ib_sa_service_rec *resp, - void *context), - void *context, - struct ib_sa_query **sa_query); + struct ib_device *device, u8 port_num, u8 method, + struct ib_sa_service_rec *rec, + ib_sa_comp_mask comp_mask, unsigned long timeout_ms, + gfp_t gfp_mask, + void (*callback)(int status, + struct ib_sa_service_rec *resp, + void *context), + void *context, struct ib_sa_query **sa_query); struct ib_sa_multicast { struct ib_sa_mcmember_rec rec; @@ -573,12 +568,11 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client, struct ib_device *device, u8 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, - int timeout_ms, gfp_t gfp_mask, + unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_guidinfo_rec *resp, void *context), - void *context, - struct ib_sa_query **sa_query); + void *context, struct ib_sa_query **sa_query); bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client, struct ib_device *device, diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index a1fd63871d17..5d3755ec5afa 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -42,15 +42,14 @@ struct ib_umem_odp; struct ib_umem { struct ib_ucontext *context; + struct mm_struct *owning_mm; size_t length; unsigned long address; int page_shift; - int writable; - int hugetlb; + u32 writable : 1; + u32 hugetlb : 1; + u32 is_odp : 1; struct work_struct work; - struct mm_struct *mm; - unsigned long diff; - struct ib_umem_odp *odp_data; struct sg_table sg_head; int nmap; int npages; diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index 381cdf5a9bd1..0b1446fe2fab 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -43,6 +43,9 @@ struct umem_odp_node { }; struct ib_umem_odp { + struct ib_umem umem; + struct ib_ucontext_per_mm *per_mm; + /* * An array of the pages included in the on-demand paging umem. * Indices of pages that are currently not mapped into the device will @@ -64,16 +67,9 @@ struct ib_umem_odp { struct mutex umem_mutex; void *private; /* for the HW driver to use. */ - /* When false, use the notifier counter in the ucontext struct. */ - bool mn_counters_active; int notifiers_seq; int notifiers_count; - /* A linked list of umems that don't have private mmu notifier - * counters yet. */ - struct list_head no_private_counters; - struct ib_umem *umem; - /* Tree tracking */ struct umem_odp_node interval_tree; @@ -82,15 +78,34 @@ struct ib_umem_odp { struct work_struct work; }; +static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) +{ + return container_of(umem, struct ib_umem_odp, umem); +} + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, - int access); -struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, - unsigned long addr, - size_t size); +struct ib_ucontext_per_mm { + struct ib_ucontext *context; + struct mm_struct *mm; + struct pid *tgid; + bool active; + + struct rb_root_cached umem_tree; + /* Protects umem_tree */ + struct rw_semaphore umem_rwsem; -void ib_umem_odp_release(struct ib_umem *umem); + struct mmu_notifier mn; + unsigned int odp_mrs_count; + + struct list_head ucontext_list; + struct rcu_head rcu; +}; + +int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access); +struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, + unsigned long addr, size_t size); +void ib_umem_odp_release(struct ib_umem_odp *umem_odp); /* * The lower 2 bits of the DMA address signal the R/W permissions for @@ -105,13 +120,14 @@ void ib_umem_odp_release(struct ib_umem *umem); #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) -int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, - u64 access_mask, unsigned long current_seq); +int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, + u64 bcnt, u64 access_mask, + unsigned long current_seq); -void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, +void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, u64 bound); -typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, +typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end, void *cookie); /* * Call the callback on each ib_umem in the range. Returns the logical or of @@ -129,46 +145,37 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length); -static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, +static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, unsigned long mmu_seq) { /* * This code is strongly based on the KVM code from * mmu_notifier_retry. Should be called with - * the relevant locks taken (item->odp_data->umem_mutex + * the relevant locks taken (umem_odp->umem_mutex * and the ucontext umem_mutex semaphore locked for read). */ - /* Do not allow page faults while the new ib_umem hasn't seen a state - * with zero notifiers yet, and doesn't have its own valid set of - * private counters. */ - if (!item->odp_data->mn_counters_active) - return 1; - - if (unlikely(item->odp_data->notifiers_count)) + if (unlikely(umem_odp->notifiers_count)) return 1; - if (item->odp_data->notifiers_seq != mmu_seq) + if (umem_odp->notifiers_seq != mmu_seq) return 1; return 0; } #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ -static inline int ib_umem_odp_get(struct ib_ucontext *context, - struct ib_umem *umem, - int access) +static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) { return -EINVAL; } -static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context, - unsigned long addr, - size_t size) +static inline struct ib_umem_odp * +ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size) { return ERR_PTR(-EINVAL); } -static inline void ib_umem_odp_release(struct ib_umem *umem) {} +static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e950c2a68f06..9c0c2132a2d6 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -69,8 +69,11 @@ #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN +struct ib_umem_odp; + extern struct workqueue_struct *ib_wq; extern struct workqueue_struct *ib_comp_wq; +extern struct workqueue_struct *ib_comp_unbound_wq; union ib_gid { u8 raw[16]; @@ -1137,7 +1140,9 @@ enum ib_qp_create_flags { */ struct ib_qp_init_attr { + /* Consumer's event_handler callback must not block */ void (*event_handler)(struct ib_event *, void *); + void *qp_context; struct ib_cq *send_cq; struct ib_cq *recv_cq; @@ -1146,7 +1151,7 @@ struct ib_qp_init_attr { struct ib_qp_cap cap; enum ib_sig_type sq_sig_type; enum ib_qp_type qp_type; - enum ib_qp_create_flags create_flags; + u32 create_flags; /* * Only needed for special QP types, or when using the RW API. @@ -1278,21 +1283,27 @@ struct ib_qp_attr { }; enum ib_wr_opcode { - IB_WR_RDMA_WRITE, - IB_WR_RDMA_WRITE_WITH_IMM, - IB_WR_SEND, - IB_WR_SEND_WITH_IMM, - IB_WR_RDMA_READ, - IB_WR_ATOMIC_CMP_AND_SWP, - IB_WR_ATOMIC_FETCH_AND_ADD, - IB_WR_LSO, - IB_WR_SEND_WITH_INV, - IB_WR_RDMA_READ_WITH_INV, - IB_WR_LOCAL_INV, - IB_WR_REG_MR, - IB_WR_MASKED_ATOMIC_CMP_AND_SWP, - IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, + /* These are shared with userspace */ + IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, + IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, + IB_WR_SEND = IB_UVERBS_WR_SEND, + IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, + IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, + IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, + IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, + IB_WR_LSO = IB_UVERBS_WR_TSO, + IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, + IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, + IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, + IB_WR_MASKED_ATOMIC_CMP_AND_SWP = + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, + IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, + + /* These are kernel only and can not be issued by userspace */ + IB_WR_REG_MR = 0x20, IB_WR_REG_SIG_MR, + /* reserve values for low level drivers' internal use. * These values will not be used at all in the ib core layer. */ @@ -1485,26 +1496,15 @@ struct ib_ucontext { * it is set when we are closing the file descriptor and indicates * that mm_sem may be locked. */ - int closing; + bool closing; bool cleanup_retryable; - struct pid *tgid; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - struct rb_root_cached umem_tree; - /* - * Protects .umem_rbroot and tree, as well as odp_mrs_count and - * mmu notifiers registration. - */ - struct rw_semaphore umem_rwsem; - void (*invalidate_range)(struct ib_umem *umem, + void (*invalidate_range)(struct ib_umem_odp *umem_odp, unsigned long start, unsigned long end); - - struct mmu_notifier mn; - atomic_t notifier_count; - /* A list of umems that don't have private mmu notifier counters yet. */ - struct list_head no_private_counters; - int odp_mrs_count; + struct mutex per_mm_list_lock; + struct list_head per_mm_list; #endif struct ib_rdmacg_object cg_obj; @@ -1570,9 +1570,10 @@ struct ib_ah { typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); enum ib_poll_context { - IB_POLL_DIRECT, /* caller context, no hw completions */ - IB_POLL_SOFTIRQ, /* poll from softirq context */ - IB_POLL_WORKQUEUE, /* poll from workqueue */ + IB_POLL_DIRECT, /* caller context, no hw completions */ + IB_POLL_SOFTIRQ, /* poll from softirq context */ + IB_POLL_WORKQUEUE, /* poll from workqueue */ + IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ }; struct ib_cq { @@ -1589,6 +1590,7 @@ struct ib_cq { struct irq_poll iop; struct work_struct work; }; + struct workqueue_struct *comp_wq; /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -2223,6 +2225,16 @@ struct rdma_netdev { union ib_gid *gid, u16 mlid); }; +struct rdma_netdev_alloc_params { + size_t sizeof_priv; + unsigned int txqs; + unsigned int rxqs; + void *param; + + int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, + struct net_device *netdev, void *param); +}; + struct ib_port_pkey_list { /* Lock to hold while modifying the list. */ spinlock_t list_lock; @@ -2253,10 +2265,11 @@ struct ib_device { struct list_head event_handler_list; spinlock_t event_handler_lock; - spinlock_t client_data_lock; + rwlock_t client_data_lock; struct list_head core_list; /* Access to the client_data_list is protected by the client_data_lock - * spinlock and the lists_rwsem read-write semaphore */ + * rwlock and the lists_rwsem read-write semaphore + */ struct list_head client_data_list; struct ib_cache cache; @@ -2523,8 +2536,8 @@ struct ib_device { /** * rdma netdev operation * - * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it - * doesn't support the specified rdma netdev type. + * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params + * must return -EOPNOTSUPP if it doesn't support the specified type. */ struct net_device *(*alloc_rdma_netdev)( struct ib_device *device, @@ -2534,9 +2547,19 @@ struct ib_device { unsigned char name_assign_type, void (*setup)(struct net_device *)); + int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, + enum rdma_netdev_t type, + struct rdma_netdev_alloc_params *params); + struct module *owner; struct device dev; - struct kobject *ports_parent; + /* First group for device attributes, + * Second group for driver provided attributes (optional). + * It is NULL terminated array. + */ + const struct attribute_group *groups[3]; + + struct kobject *ports_kobj; struct list_head port_list; enum { @@ -2619,9 +2642,9 @@ void ib_dealloc_device(struct ib_device *device); void ib_get_device_fw_str(struct ib_device *device, char *str); -int ib_register_device(struct ib_device *device, - int (*port_callback)(struct ib_device *, - u8, struct kobject *)); +int ib_register_device(struct ib_device *device, const char *name, + int (*port_callback)(struct ib_device *, u8, + struct kobject *)); void ib_unregister_device(struct ib_device *device); int ib_register_client (struct ib_client *client); @@ -2631,6 +2654,28 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client); void ib_set_client_data(struct ib_device *device, struct ib_client *client, void *data); +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) +int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot); +int rdma_user_mmap_page(struct ib_ucontext *ucontext, + struct vm_area_struct *vma, struct page *page, + unsigned long size); +#else +static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext, + struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, + pgprot_t prot) +{ + return -EINVAL; +} +static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext, + struct vm_area_struct *vma, struct page *page, + unsigned long size) +{ + return -EINVAL; +} +#endif + static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) { return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; @@ -2714,7 +2759,6 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt, * @next_state: Next QP state * @type: QP type * @mask: Mask of supplied QP attributes - * @ll : link layer of port * * This function is a helper function that a low-level driver's * modify_qp method can use to validate the consumer's input. It @@ -2723,8 +2767,7 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt, * and that the attribute mask supplied is allowed for the transition. */ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask, - enum rdma_link_layer ll); + enum ib_qp_type type, enum ib_qp_attr_mask mask); void ib_register_event_handler(struct ib_event_handler *event_handler); void ib_unregister_event_handler(struct ib_event_handler *event_handler); @@ -4153,20 +4196,6 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector) } -static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow, - struct ib_qp *qp, struct ib_device *device) -{ - uobj->object = ibflow; - ibflow->uobject = uobj; - - if (qp) { - atomic_inc(&qp->usecnt); - ibflow->qp = qp; - } - - ibflow->device = device; -} - /** * rdma_roce_rescan_device - Rescan all of the network devices in the system * and add their gids, as needed, to the relevant RoCE devices. @@ -4179,4 +4208,38 @@ struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile); int uverbs_destroy_def_handler(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs); + +struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, + enum rdma_netdev_t type, const char *name, + unsigned char name_assign_type, + void (*setup)(struct net_device *)); + +int rdma_init_netdev(struct ib_device *device, u8 port_num, + enum rdma_netdev_t type, const char *name, + unsigned char name_assign_type, + void (*setup)(struct net_device *), + struct net_device *netdev); + +/** + * rdma_set_device_sysfs_group - Set device attributes group to have + * driver specific sysfs entries at + * for infiniband class. + * + * @device: device pointer for which attributes to be created + * @group: Pointer to group which should be added when device + * is registered with sysfs. + * rdma_set_device_sysfs_group() allows existing drivers to expose one + * group per device to have sysfs attributes. + * + * NOTE: New drivers should not make use of this API; instead new device + * parameter should be exposed via netlink command. This API and mechanism + * exist only for existing drivers. + */ +static inline void +rdma_set_device_sysfs_group(struct ib_device *dev, + const struct attribute_group *group) +{ + dev->groups[1] = group; +} + #endif /* IB_VERBS_H */ diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 5d71a7f51a9f..60987a5903b7 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -152,7 +152,11 @@ struct rdma_cm_id *__rdma_create_id(struct net *net, * @ps: RDMA port space. * @qp_type: type of queue pair associated with the id. * - * The id holds a reference on the network namespace until it is destroyed. + * Returns a new rdma_cm_id. The id holds a reference on the network + * namespace until it is destroyed. + * + * The event handler callback serializes on the id's mutex and is + * allowed to sleep. */ #define rdma_create_id(net, event_handler, context, ps, qp_type) \ __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \ @@ -192,7 +196,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr); * @timeout_ms: Time to wait for resolution to complete. */ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, - const struct sockaddr *dst_addr, int timeout_ms); + const struct sockaddr *dst_addr, + unsigned long timeout_ms); /** * rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier @@ -202,7 +207,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, * Users must have first called rdma_resolve_addr to resolve a dst_addr * into an RDMA address before calling this routine. */ -int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms); +int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms); /** * rdma_create_qp - Allocate a QP and associate it with the specified RDMA diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index c369703fcd69..70218e6b5187 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h @@ -96,7 +96,7 @@ int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags); /** * Check if there are any listeners to the netlink group * @group: the netlink group ID - * Returns 0 on success or a negative for no listeners. + * Returns true on success or false if no listeners. */ -int rdma_nl_chk_listeners(unsigned int group); +bool rdma_nl_chk_listeners(unsigned int group); #endif /* _RDMA_NETLINK_H */ diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index e79229a0cf01..3584d0816fcd 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -149,6 +149,10 @@ struct rvt_ibport { #define RVT_CQN_MAX 16 /* maximum length of cq name */ +#define RVT_SGE_COPY_MEMCPY 0 +#define RVT_SGE_COPY_CACHELESS 1 +#define RVT_SGE_COPY_ADAPTIVE 2 + /* * Things that are driver specific, module parameters in hfi1 and qib */ @@ -161,6 +165,9 @@ struct rvt_driver_params { */ unsigned int lkey_table_size; unsigned int qp_table_size; + unsigned int sge_copy_mode; + unsigned int wss_threshold; + unsigned int wss_clean_period; int qpn_start; int qpn_inc; int qpn_res_start; @@ -193,6 +200,19 @@ struct rvt_ah { u8 log_pmtu; }; +/* memory working set size */ +struct rvt_wss { + unsigned long *entries; + atomic_t total_count; + atomic_t clean_counter; + atomic_t clean_entry; + + int threshold; + int num_entries; + long pages_mask; + unsigned int clean_period; +}; + struct rvt_dev_info; struct rvt_swqe; struct rvt_driver_provided { @@ -211,11 +231,18 @@ struct rvt_driver_provided { * version requires the s_lock not to be held. The other assumes the * s_lock is held. */ - void (*schedule_send)(struct rvt_qp *qp); - void (*schedule_send_no_lock)(struct rvt_qp *qp); + bool (*schedule_send)(struct rvt_qp *qp); + bool (*schedule_send_no_lock)(struct rvt_qp *qp); - /* Driver specific work request checking */ - int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe); + /* + * Driver specific work request setup and checking. + * This function is allowed to perform any setup, checks, or + * adjustments required to the SWQE in order to be usable by + * underlying protocols. This includes private data structure + * allocations. + */ + int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe, + bool *call_send); /* * Sometimes rdmavt needs to kick the driver's send progress. That is @@ -371,6 +398,9 @@ struct rvt_dev_info { /* post send table */ const struct rvt_operation_params *post_parms; + /* opcode translation table */ + const enum ib_wc_opcode *wc_opcode; + /* Driver specific helper functions */ struct rvt_driver_provided driver_f; @@ -411,6 +441,8 @@ struct rvt_dev_info { u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ spinlock_t n_mcast_grps_lock; + /* Memory Working Set Size */ + struct rvt_wss *wss; }; /** @@ -423,7 +455,14 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi, const char *fmt, const char *name, const int unit) { - snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit); + /* + * FIXME: rvt and its users want to touch the ibdev before + * registration and have things like the name work. We don't have the + * infrastructure in the core to support this directly today, hack it + * to work by setting the name manually here. + */ + dev_set_name(&rdi->ibdev.dev, fmt, name, unit); + strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX); } /** @@ -434,7 +473,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi, */ static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi) { - return rdi->ibdev.name; + return dev_name(&rdi->ibdev.dev); } static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd) diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 927f6d5b6d0f..cbafb1878669 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -678,6 +678,13 @@ void rvt_del_timers_sync(struct rvt_qp *qp); void rvt_stop_rc_timers(struct rvt_qp *qp); void rvt_add_retry_timer(struct rvt_qp *qp); +void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, + void *data, u32 length, + bool release, bool copy_last); +void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, + enum ib_wc_status status); +void rvt_ruc_loopback(struct rvt_qp *qp); + /** * struct rvt_qp_iter - the iterator for QPs * @qp - the current QP diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 9654d33edd98..2638fa7cd702 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -173,16 +173,10 @@ int rdma_restrack_put(struct rdma_restrack_entry *res); /** * rdma_restrack_set_task() - set the task for this resource * @res: resource entry - * @task: task struct + * @caller: kernel name, the current task will be used if the caller is NULL. */ -static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res, - struct task_struct *task) -{ - if (res->task) - put_task_struct(res->task); - get_task_struct(task); - res->task = task; -} +void rdma_restrack_set_task(struct rdma_restrack_entry *res, + const char *caller); /* * Helper functions for rdma drivers when filling out diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 9e997c3c2f04..84d3d15f1f38 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -52,6 +52,7 @@ enum uverbs_attr_type { UVERBS_ATTR_TYPE_IDR, UVERBS_ATTR_TYPE_FD, UVERBS_ATTR_TYPE_ENUM_IN, + UVERBS_ATTR_TYPE_IDRS_ARRAY, }; enum uverbs_obj_access { @@ -101,7 +102,7 @@ struct uverbs_attr_spec { } enum_def; } u; - /* This weird split of the enum lets us remove some padding */ + /* This weird split lets us remove some padding */ union { struct { /* @@ -111,6 +112,17 @@ struct uverbs_attr_spec { */ const struct uverbs_attr_spec *ids; } enum_def; + + struct { + /* + * higher bits mean the namespace and lower bits mean + * the type id within the namespace. + */ + u16 obj_type; + u16 min_len; + u16 max_len; + u8 access; + } objs_arr; } u2; }; @@ -251,6 +263,11 @@ static inline __attribute_const__ u32 uapi_bkey_attr(u32 attr_key) return attr_key - 1; } +static inline __attribute_const__ u32 uapi_bkey_to_key_attr(u32 attr_bkey) +{ + return attr_bkey + 1; +} + /* * ======================================= * Verbs definitions @@ -323,6 +340,27 @@ struct uverbs_object_tree_def { #define UA_MANDATORY .mandatory = 1 #define UA_OPTIONAL .mandatory = 0 +/* + * min_len must be bigger than 0 and _max_len must be smaller than 4095. Only + * READ\WRITE accesses are supported. + */ +#define UVERBS_ATTR_IDRS_ARR(_attr_id, _idr_type, _access, _min_len, _max_len, \ + ...) \ + (&(const struct uverbs_attr_def){ \ + .id = (_attr_id) + \ + BUILD_BUG_ON_ZERO((_min_len) == 0 || \ + (_max_len) > \ + PAGE_SIZE / sizeof(void *) || \ + (_min_len) > (_max_len) || \ + (_access) == UVERBS_ACCESS_NEW || \ + (_access) == UVERBS_ACCESS_DESTROY), \ + .attr = { .type = UVERBS_ATTR_TYPE_IDRS_ARRAY, \ + .u2.objs_arr.obj_type = _idr_type, \ + .u2.objs_arr.access = _access, \ + .u2.objs_arr.min_len = _min_len, \ + .u2.objs_arr.max_len = _max_len, \ + __VA_ARGS__ } }) + #define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \ (&(const struct uverbs_attr_def){ \ .id = _attr_id, \ @@ -365,6 +403,15 @@ struct uverbs_object_tree_def { __VA_ARGS__ }, \ }) +/* An input value that is a member in the enum _enum_type. */ +#define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \ + UVERBS_ATTR_PTR_IN( \ + _attr_id, \ + UVERBS_ATTR_SIZE( \ + sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \ + sizeof(u64)), \ + __VA_ARGS__) + /* * An input value that is a bitwise combination of values of _enum_type. * This permits the flag value to be passed as either a u32 or u64, it must @@ -431,10 +478,16 @@ struct uverbs_obj_attr { const struct uverbs_api_attr *attr_elm; }; +struct uverbs_objs_arr_attr { + struct ib_uobject **uobjects; + u16 len; +}; + struct uverbs_attr { union { struct uverbs_ptr_attr ptr_attr; struct uverbs_obj_attr obj_attr; + struct uverbs_objs_arr_attr objs_arr_attr; }; }; @@ -507,6 +560,31 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) return attr->ptr_attr.len; } +/** + * uverbs_attr_get_uobjs_arr() - Provides array's properties for attribute for + * UVERBS_ATTR_TYPE_IDRS_ARRAY. + * @arr: Returned pointer to array of pointers for uobjects or NULL if + * the attribute isn't provided. + * + * Return: The array length or 0 if no attribute was provided. + */ +static inline int uverbs_attr_get_uobjs_arr( + const struct uverbs_attr_bundle *attrs_bundle, u16 attr_idx, + struct ib_uobject ***arr) +{ + const struct uverbs_attr *attr = + uverbs_attr_get(attrs_bundle, attr_idx); + + if (IS_ERR(attr)) { + *arr = NULL; + return 0; + } + + *arr = attr->objs_arr_attr.uobjects; + + return attr->objs_arr_attr.len; +} + static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr) { return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data); @@ -603,6 +681,9 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, { return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO); } +int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val); #else static inline int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, @@ -631,6 +712,34 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, { return ERR_PTR(-EINVAL); } +static inline int +_uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val) +{ + return -EINVAL; +} #endif +#define uverbs_get_const(_to, _attrs_bundle, _idx) \ + ({ \ + s64 _val; \ + int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \ + type_min(typeof(*_to)), \ + type_max(typeof(*_to)), NULL); \ + (*_to) = _val; \ + _ret; \ + }) + +#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \ + ({ \ + s64 _val; \ + s64 _def_val = _default; \ + int _ret = \ + _uverbs_get_const(&_val, _attrs_bundle, _idx, \ + type_min(typeof(*_to)), \ + type_max(typeof(*_to)), &_def_val); \ + (*_to) = _val; \ + _ret; \ + }) #endif diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 3b00231cc084..3db2802fbc68 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -140,5 +140,56 @@ __uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile, #define uobj_alloc(_type, _ufile, _ib_dev) \ __uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev) +static inline void uverbs_flow_action_fill_action(struct ib_flow_action *action, + struct ib_uobject *uobj, + struct ib_device *ib_dev, + enum ib_flow_action_type type) +{ + atomic_set(&action->usecnt, 0); + action->device = ib_dev; + action->type = type; + action->uobject = uobj; + uobj->object = action; +} + +struct ib_uflow_resources { + size_t max; + size_t num; + size_t collection_num; + size_t counters_num; + struct ib_counters **counters; + struct ib_flow_action **collection; +}; + +struct ib_uflow_object { + struct ib_uobject uobject; + struct ib_uflow_resources *resources; +}; + +struct ib_uflow_resources *flow_resources_alloc(size_t num_specs); +void flow_resources_add(struct ib_uflow_resources *uflow_res, + enum ib_flow_spec_type type, + void *ibobj); +void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res); + +static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow, + struct ib_qp *qp, struct ib_device *device, + struct ib_uflow_resources *uflow_res) +{ + struct ib_uflow_object *uflow; + + uobj->object = ibflow; + ibflow->uobject = uobj; + + if (qp) { + atomic_inc(&qp->usecnt); + ibflow->qp = qp; + } + + ibflow->device = device; + uflow = container_of(uobj, typeof(*uflow), uobject); + uflow->resources = uflow_res; +} + #endif diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h index eaaf56df4086..5b99cb2ea5ef 100644 --- a/include/soc/fsl/bman.h +++ b/include/soc/fsl/bman.h @@ -126,4 +126,12 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num); */ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num); +/** + * bman_is_probed - Check if bman is probed + * + * Returns 1 if the bman driver successfully probed, -1 if the bman driver + * failed to probe or 0 if the bman driver did not probed yet. + */ +int bman_is_probed(void); + #endif /* __FSL_BMAN_H */ diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h index 2576abaa7779..90ae8d191f1a 100644 --- a/include/soc/fsl/dpaa2-fd.h +++ b/include/soc/fsl/dpaa2-fd.h @@ -66,6 +66,15 @@ struct dpaa2_fd { #define SG_BPID_MASK 0x3FFF #define SG_FINAL_FLAG_MASK 0x1 #define SG_FINAL_FLAG_SHIFT 15 +#define FL_SHORT_LEN_FLAG_MASK 0x1 +#define FL_SHORT_LEN_FLAG_SHIFT 14 +#define FL_SHORT_LEN_MASK 0x3FFFF +#define FL_OFFSET_MASK 0x0FFF +#define FL_FORMAT_MASK 0x3 +#define FL_FORMAT_SHIFT 12 +#define FL_BPID_MASK 0x3FFF +#define FL_FINAL_FLAG_MASK 0x1 +#define FL_FINAL_FLAG_SHIFT 15 /* Error bits in FD CTRL */ #define FD_CTRL_ERR_MASK 0x000000FF @@ -435,4 +444,237 @@ static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT); } +/** + * struct dpaa2_fl_entry - structure for frame list entry. + * @addr: address in the FLE + * @len: length in the FLE + * @bpid: buffer pool ID + * @format_offset: format, offset, and short-length fields + * @frc: frame context + * @ctrl: control bits...including pta, pvt1, pvt2, err, etc + * @flc: flow context address + */ +struct dpaa2_fl_entry { + __le64 addr; + __le32 len; + __le16 bpid; + __le16 format_offset; + __le32 frc; + __le32 ctrl; + __le64 flc; +}; + +enum dpaa2_fl_format { + dpaa2_fl_single = 0, + dpaa2_fl_res, + dpaa2_fl_sg +}; + +/** + * dpaa2_fl_get_addr() - get the addr field of FLE + * @fle: the given frame list entry + * + * Return the address in the frame list entry. + */ +static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) +{ + return (dma_addr_t)le64_to_cpu(fle->addr); +} + +/** + * dpaa2_fl_set_addr() - Set the addr field of FLE + * @fle: the given frame list entry + * @addr: the address needs to be set in frame list entry + */ +static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, + dma_addr_t addr) +{ + fle->addr = cpu_to_le64(addr); +} + +/** + * dpaa2_fl_get_frc() - Get the frame context in the FLE + * @fle: the given frame list entry + * + * Return the frame context field in the frame lsit entry. + */ +static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle) +{ + return le32_to_cpu(fle->frc); +} + +/** + * dpaa2_fl_set_frc() - Set the frame context in the FLE + * @fle: the given frame list entry + * @frc: the frame context needs to be set in frame list entry + */ +static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc) +{ + fle->frc = cpu_to_le32(frc); +} + +/** + * dpaa2_fl_get_ctrl() - Get the control bits in the FLE + * @fle: the given frame list entry + * + * Return the control bits field in the frame list entry. + */ +static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle) +{ + return le32_to_cpu(fle->ctrl); +} + +/** + * dpaa2_fl_set_ctrl() - Set the control bits in the FLE + * @fle: the given frame list entry + * @ctrl: the control bits to be set in the frame list entry + */ +static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl) +{ + fle->ctrl = cpu_to_le32(ctrl); +} + +/** + * dpaa2_fl_get_flc() - Get the flow context in the FLE + * @fle: the given frame list entry + * + * Return the flow context in the frame list entry. + */ +static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) +{ + return (dma_addr_t)le64_to_cpu(fle->flc); +} + +/** + * dpaa2_fl_set_flc() - Set the flow context field of FLE + * @fle: the given frame list entry + * @flc_addr: the flow context needs to be set in frame list entry + */ +static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, + dma_addr_t flc_addr) +{ + fle->flc = cpu_to_le64(flc_addr); +} + +static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle) +{ + return !!((le16_to_cpu(fle->format_offset) >> + FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK); +} + +/** + * dpaa2_fl_get_len() - Get the length in the FLE + * @fle: the given frame list entry + * + * Return the length field in the frame list entry. + */ +static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) +{ + if (dpaa2_fl_short_len(fle)) + return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK; + + return le32_to_cpu(fle->len); +} + +/** + * dpaa2_fl_set_len() - Set the length field of FLE + * @fle: the given frame list entry + * @len: the length needs to be set in frame list entry + */ +static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) +{ + fle->len = cpu_to_le32(len); +} + +/** + * dpaa2_fl_get_offset() - Get the offset field in the frame list entry + * @fle: the given frame list entry + * + * Return the offset. + */ +static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) +{ + return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK; +} + +/** + * dpaa2_fl_set_offset() - Set the offset field of FLE + * @fle: the given frame list entry + * @offset: the offset needs to be set in frame list entry + */ +static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset) +{ + fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK); + fle->format_offset |= cpu_to_le16(offset); +} + +/** + * dpaa2_fl_get_format() - Get the format field in the FLE + * @fle: the given frame list entry + * + * Return the format. + */ +static inline enum dpaa2_fl_format dpaa2_fl_get_format(const struct dpaa2_fl_entry *fle) +{ + return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >> + FL_FORMAT_SHIFT) & FL_FORMAT_MASK); +} + +/** + * dpaa2_fl_set_format() - Set the format field of FLE + * @fle: the given frame list entry + * @format: the format needs to be set in frame list entry + */ +static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, + enum dpaa2_fl_format format) +{ + fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT)); + fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT); +} + +/** + * dpaa2_fl_get_bpid() - Get the bpid field in the FLE + * @fle: the given frame list entry + * + * Return the buffer pool id. + */ +static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) +{ + return le16_to_cpu(fle->bpid) & FL_BPID_MASK; +} + +/** + * dpaa2_fl_set_bpid() - Set the bpid field of FLE + * @fle: the given frame list entry + * @bpid: buffer pool id to be set + */ +static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid) +{ + fle->bpid &= cpu_to_le16(~(FL_BPID_MASK)); + fle->bpid |= cpu_to_le16(bpid); +} + +/** + * dpaa2_fl_is_final() - Check final bit in FLE + * @fle: the given frame list entry + * + * Return bool. + */ +static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) +{ + return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT); +} + +/** + * dpaa2_fl_set_final() - Set the final bit in FLE + * @fle: the given frame list entry + * @final: the final boolean to be set + */ +static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) +{ + fle->format_offset &= cpu_to_le16((~(FL_FINAL_FLAG_MASK << + FL_FINAL_FLAG_SHIFT)) & 0xFFFF); + fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT); +} + #endif /* __FSL_DPAA2_FD_H */ diff --git a/include/soc/fsl/dpaa2-global.h b/include/soc/fsl/dpaa2-global.h index 9bc0713346a8..2bfc379d3dc9 100644 --- a/include/soc/fsl/dpaa2-global.h +++ b/include/soc/fsl/dpaa2-global.h @@ -174,4 +174,19 @@ static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) return (const struct dpaa2_fd *)&dq->dq.fd[0]; } +#define DPAA2_CSCN_SIZE sizeof(struct dpaa2_dq) +#define DPAA2_CSCN_ALIGN 16 +#define DPAA2_CSCN_STATE_CG BIT(0) + +/** + * dpaa2_cscn_state_congested() - Check congestion state + * @cscn: congestion SCN (delivered to WQ or memory) + * +i * Return true is congested. + */ +static inline bool dpaa2_cscn_state_congested(struct dpaa2_dq *cscn) +{ + return !!(cscn->scn.state & DPAA2_CSCN_STATE_CG); +} + #endif /* __FSL_DPAA2_GLOBAL_H */ diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h index ab51e40d11db..70997ab2146c 100644 --- a/include/soc/fsl/dpaa2-io.h +++ b/include/soc/fsl/dpaa2-io.h @@ -97,9 +97,13 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service, int dpaa2_io_service_rearm(struct dpaa2_io *service, struct dpaa2_io_notification_ctx *ctx); +int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid, + struct dpaa2_io_store *s); int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, struct dpaa2_io_store *s); +int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid, + const struct dpaa2_fd *fd); int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, u16 qdbin, const struct dpaa2_fd *fd); int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid, diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h index 3ee9e7c1a7d7..dcd6b865b590 100644 --- a/include/soc/fsl/qe/ucc_fast.h +++ b/include/soc/fsl/qe/ucc_fast.h @@ -41,8 +41,12 @@ #define R_L_S 0x0800 /* last */ #define R_F_S 0x0400 /* first */ #define R_CM_S 0x0200 /* continuous mode */ +#define R_LG_S 0x0020 /* frame length */ +#define R_NO_S 0x0010 /* nonoctet */ +#define R_AB_S 0x0008 /* abort */ #define R_CR_S 0x0004 /* crc */ -#define R_OV_S 0x0002 /* crc */ +#define R_OV_S 0x0002 /* overrun */ +#define R_CD_S 0x0001 /* carrier detect */ /* transmit BD's status */ #define T_R_S 0x8000 /* ready bit */ @@ -51,6 +55,8 @@ #define T_L_S 0x0800 /* last */ #define T_TC_S 0x0400 /* crc */ #define T_TM_S 0x0200 /* continuous mode */ +#define T_UN_S 0x0002 /* hdlc underrun */ +#define T_CT_S 0x0001 /* hdlc carrier lost */ /* Rx Data buffer must be 4 bytes aligned in most cases */ #define UCC_FAST_RX_ALIGN 4 diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index d4dfefdee6c1..597783b8a3a0 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -1186,4 +1186,12 @@ int qman_alloc_cgrid_range(u32 *result, u32 count); */ int qman_release_cgrid(u32 id); +/** + * qman_is_probed - Check if qman is probed + * + * Returns 1 if the qman driver successfully probed, -1 if the qman driver + * failed to probe or 0 if the qman driver did not probed yet. + */ +int qman_is_probed(void); + #endif /* __FSL_QMAN_H */ diff --git a/include/soc/mscc/ocelot_hsio.h b/include/soc/mscc/ocelot_hsio.h new file mode 100644 index 000000000000..43112dd7313a --- /dev/null +++ b/include/soc/mscc/ocelot_hsio.h @@ -0,0 +1,859 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_HSIO_H_ +#define _MSCC_OCELOT_HSIO_H_ + +#define HSIO_PLL5G_CFG0 0x0000 +#define HSIO_PLL5G_CFG1 0x0004 +#define HSIO_PLL5G_CFG2 0x0008 +#define HSIO_PLL5G_CFG3 0x000c +#define HSIO_PLL5G_CFG4 0x0010 +#define HSIO_PLL5G_CFG5 0x0014 +#define HSIO_PLL5G_CFG6 0x0018 +#define HSIO_PLL5G_STATUS0 0x001c +#define HSIO_PLL5G_STATUS1 0x0020 +#define HSIO_PLL5G_BIST_CFG0 0x0024 +#define HSIO_PLL5G_BIST_CFG1 0x0028 +#define HSIO_PLL5G_BIST_CFG2 0x002c +#define HSIO_PLL5G_BIST_STAT0 0x0030 +#define HSIO_PLL5G_BIST_STAT1 0x0034 +#define HSIO_RCOMP_CFG0 0x0038 +#define HSIO_RCOMP_STATUS 0x003c +#define HSIO_SYNC_ETH_CFG 0x0040 +#define HSIO_SYNC_ETH_PLL_CFG 0x0048 +#define HSIO_S1G_DES_CFG 0x004c +#define HSIO_S1G_IB_CFG 0x0050 +#define HSIO_S1G_OB_CFG 0x0054 +#define HSIO_S1G_SER_CFG 0x0058 +#define HSIO_S1G_COMMON_CFG 0x005c +#define HSIO_S1G_PLL_CFG 0x0060 +#define HSIO_S1G_PLL_STATUS 0x0064 +#define HSIO_S1G_DFT_CFG0 0x0068 +#define HSIO_S1G_DFT_CFG1 0x006c +#define HSIO_S1G_DFT_CFG2 0x0070 +#define HSIO_S1G_TP_CFG 0x0074 +#define HSIO_S1G_RC_PLL_BIST_CFG 0x0078 +#define HSIO_S1G_MISC_CFG 0x007c +#define HSIO_S1G_DFT_STATUS 0x0080 +#define HSIO_S1G_MISC_STATUS 0x0084 +#define HSIO_MCB_S1G_ADDR_CFG 0x0088 +#define HSIO_S6G_DIG_CFG 0x008c +#define HSIO_S6G_DFT_CFG0 0x0090 +#define HSIO_S6G_DFT_CFG1 0x0094 +#define HSIO_S6G_DFT_CFG2 0x0098 +#define HSIO_S6G_TP_CFG0 0x009c +#define HSIO_S6G_TP_CFG1 0x00a0 +#define HSIO_S6G_RC_PLL_BIST_CFG 0x00a4 +#define HSIO_S6G_MISC_CFG 0x00a8 +#define HSIO_S6G_OB_ANEG_CFG 0x00ac +#define HSIO_S6G_DFT_STATUS 0x00b0 +#define HSIO_S6G_ERR_CNT 0x00b4 +#define HSIO_S6G_MISC_STATUS 0x00b8 +#define HSIO_S6G_DES_CFG 0x00bc +#define HSIO_S6G_IB_CFG 0x00c0 +#define HSIO_S6G_IB_CFG1 0x00c4 +#define HSIO_S6G_IB_CFG2 0x00c8 +#define HSIO_S6G_IB_CFG3 0x00cc +#define HSIO_S6G_IB_CFG4 0x00d0 +#define HSIO_S6G_IB_CFG5 0x00d4 +#define HSIO_S6G_OB_CFG 0x00d8 +#define HSIO_S6G_OB_CFG1 0x00dc +#define HSIO_S6G_SER_CFG 0x00e0 +#define HSIO_S6G_COMMON_CFG 0x00e4 +#define HSIO_S6G_PLL_CFG 0x00e8 +#define HSIO_S6G_ACJTAG_CFG 0x00ec +#define HSIO_S6G_GP_CFG 0x00f0 +#define HSIO_S6G_IB_STATUS0 0x00f4 +#define HSIO_S6G_IB_STATUS1 0x00f8 +#define HSIO_S6G_ACJTAG_STATUS 0x00fc +#define HSIO_S6G_PLL_STATUS 0x0100 +#define HSIO_S6G_REVID 0x0104 +#define HSIO_MCB_S6G_ADDR_CFG 0x0108 +#define HSIO_HW_CFG 0x010c +#define HSIO_HW_QSGMII_CFG 0x0110 +#define HSIO_HW_QSGMII_STAT 0x0114 +#define HSIO_CLK_CFG 0x0118 +#define HSIO_TEMP_SENSOR_CTRL 0x011c +#define HSIO_TEMP_SENSOR_CFG 0x0120 +#define HSIO_TEMP_SENSOR_STAT 0x0124 + +#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31) +#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30) +#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29) +#define HSIO_PLL5G_CFG0_DIV4 BIT(28) +#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27) +#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23)) +#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23) +#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16) +#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15) +#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14) +#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13) +#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0)) +#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0) + +#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18) +#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17) +#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16) +#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15) +#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) +#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5) +#define HSIO_PLL5G_CFG1_PWD_TX BIT(4) +#define HSIO_PLL5G_CFG1_PWD_RX BIT(3) +#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2) +#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1) +#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0) + +#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30) +#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29) +#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28) +#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27) +#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26) +#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25) +#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24) +#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15) +#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14) +#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13) +#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12) +#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11) +#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10) +#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5) +#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4) +#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3) +#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2) +#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1) +#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0) + +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22)) +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22) +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18) +#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17) +#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16) +#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15) +#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14) +#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13) +#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12) +#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11) +#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10) +#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9) +#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8) +#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0)) +#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0) + +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0) + +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0) + +#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23) +#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20)) +#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20) +#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20) +#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7) +#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6) +#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0)) +#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0) + +#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12) +#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11) +#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10) +#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9) +#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1)) +#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1) +#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0) + +#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21)) +#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21) +#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21) +#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16)) +#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16) +#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4)) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4) +#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1) +#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0) + +#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31) +#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16) +#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0) + +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2) +#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0) + +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16)) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0) + +#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13) +#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12) +#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10)) +#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10) +#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10) +#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8)) +#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8) +#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4) +#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0)) +#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0) + +#define HSIO_RCOMP_STATUS_BUSY BIT(12) +#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7) +#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0)) +#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0) + +#define HSIO_SYNC_ETH_CFG_RSZ 0x4 + +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0) + +#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0) + +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8)) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5)) +#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5) +#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4) +#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1) +#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0) + +#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24)) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24) +#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19) +#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14) +#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13) +#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12) +#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11) +#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10) +#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17)) +#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17) +#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10)) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) +#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9) +#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9) +#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8) +#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7) +#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6) +#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) +#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3) +#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2) +#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1) +#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0) + +#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31) +#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21) +#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18) +#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17) +#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13)) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13) +#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12) +#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11) +#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10) +#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9) +#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8) +#define HSIO_S1G_COMMON_CFG_HRATE BIT(7) +#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0) + +#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22) +#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7) +#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6) +#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5) +#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3) + +#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12) +#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11) +#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10) +#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) +#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) + +#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31) +#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) +#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) +#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) +#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3) +#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2) +#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0) + +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3) +#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) +#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) +#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) + +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3) +#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) +#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) +#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) + +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) + +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) +#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) +#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) +#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3) +#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2) +#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0) + +#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) +#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6) +#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) +#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3) +#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2) +#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1) +#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0) + +#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) + +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0)) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0) + +#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16) +#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7) +#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6) +#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3)) +#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3) +#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0)) +#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0) + +#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31) +#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) +#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) +#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) +#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3) +#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2) +#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0) + +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3) +#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) +#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) +#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) + +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3) +#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) +#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) +#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) + +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) + +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13)) +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13) +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) +#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7) +#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6) +#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) +#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) +#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3) +#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2) +#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0) + +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0) + +#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) +#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3) +#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2) +#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1) +#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0) + +#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) + +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10)) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8)) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5)) +#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5) +#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4) +#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1) +#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0) + +#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29)) +#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29) +#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29) +#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24)) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20)) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18)) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15)) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7) +#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5) +#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4) +#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3) +#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2) +#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1) +#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0) + +#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17)) +#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17) +#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17) +#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12)) +#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12) +#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12) +#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8)) +#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8) +#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8) +#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7) +#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6) +#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5) +#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4) +#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3) +#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2) +#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1) +#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0) + +#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27)) +#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27) +#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27) +#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22)) +#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22) +#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22) +#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19) +#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16) +#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10)) +#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10) +#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10) +#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5) +#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3)) +#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3) +#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0)) +#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0) + +#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31) +#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30) +#define HSIO_S6G_OB_CFG_OB_POL BIT(29) +#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) +#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23) +#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) +#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18) +#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17) +#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16) +#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11)) +#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11) +#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11) +#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10) +#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9) +#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8) +#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4) +#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6) +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0) + +#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8) +#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7) +#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6) +#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) +#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3) +#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2) +#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1) +#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0) + +#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17) +#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16) +#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15) +#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14) +#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13) +#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9)) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9) +#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8) +#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7) +#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6) +#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5) +#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4) +#define HSIO_S6G_COMMON_CFG_HRATE BIT(3) +#define HSIO_S6G_COMMON_CFG_QRATE BIT(2) +#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0)) +#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0) + +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16) +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15) +#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) +#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5) +#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4) +#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3) +#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2) +#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1) +#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0) + +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3) +#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1) +#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0) + +#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16)) +#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16) +#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0)) +#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0) + +#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8) +#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7) +#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6) +#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2) +#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1) +#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0) + +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18) +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0) + +#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2) +#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1) +#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0) + +#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10) +#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9) +#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8) +#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) +#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) + +#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26)) +#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26) +#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26) +#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21)) +#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21) +#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21) +#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16)) +#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16) +#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10)) +#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10) +#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10) +#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5) +#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0)) +#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0) + +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0)) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0) + +#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6) +#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5) +#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4) +#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3) +#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2) +#define HSIO_HW_CFG_PCIE_ENA BIT(1) +#define HSIO_HW_CFG_QSGMII_ENA BIT(0) + +#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3) +#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2) +#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1) +#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0) + +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1)) +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1) +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1) +#define HSIO_HW_QSGMII_STAT_SYNC BIT(0) + +#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1)) +#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1) +#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0) + +#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1) +#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0) + +#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8) +#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0)) +#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0) + +#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8) +#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0)) +#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0) + +#endif diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h new file mode 100644 index 000000000000..0d98bb9068b1 --- /dev/null +++ b/include/sound/hda_codec.h @@ -0,0 +1,534 @@ +/* + * Universal Interface for Intel High Definition Audio Codec + * + * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __SOUND_HDA_CODEC_H +#define __SOUND_HDA_CODEC_H + +#include <linux/kref.h> +#include <linux/mod_devicetable.h> +#include <sound/info.h> +#include <sound/control.h> +#include <sound/pcm.h> +#include <sound/hwdep.h> +#include <sound/hdaudio.h> +#include <sound/hda_verbs.h> +#include <sound/hda_regmap.h> + +/* + * Structures + */ + +struct hda_bus; +struct hda_beep; +struct hda_codec; +struct hda_pcm; +struct hda_pcm_stream; + +/* + * codec bus + * + * each controller needs to creata a hda_bus to assign the accessor. + * A hda_bus contains several codecs in the list codec_list. + */ +struct hda_bus { + struct hdac_bus core; + + struct snd_card *card; + + struct pci_dev *pci; + const char *modelname; + + struct mutex prepare_mutex; + + /* assigned PCMs */ + DECLARE_BITMAP(pcm_dev_bits, SNDRV_PCM_DEVICES); + + /* misc op flags */ + unsigned int needs_damn_long_delay :1; + unsigned int allow_bus_reset:1; /* allow bus reset at fatal error */ + /* status for codec/controller */ + unsigned int shutdown :1; /* being unloaded */ + unsigned int response_reset:1; /* controller was reset */ + unsigned int in_reset:1; /* during reset operation */ + unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ + + int primary_dig_out_type; /* primary digital out PCM type */ + unsigned int mixer_assigned; /* codec addr for mixer name */ +}; + +/* from hdac_bus to hda_bus */ +#define to_hda_bus(bus) container_of(bus, struct hda_bus, core) + +/* + * codec preset + * + * Known codecs have the patch to build and set up the controls/PCMs + * better than the generic parser. + */ +typedef int (*hda_codec_patch_t)(struct hda_codec *); + +#define HDA_CODEC_ID_SKIP_PROBE 0x00000001 +#define HDA_CODEC_ID_GENERIC_HDMI 0x00000101 +#define HDA_CODEC_ID_GENERIC 0x00000201 + +#define HDA_CODEC_REV_ENTRY(_vid, _rev, _name, _patch) \ + { .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \ + .api_version = HDA_DEV_LEGACY, \ + .driver_data = (unsigned long)(_patch) } +#define HDA_CODEC_ENTRY(_vid, _name, _patch) \ + HDA_CODEC_REV_ENTRY(_vid, 0, _name, _patch) + +struct hda_codec_driver { + struct hdac_driver core; + const struct hda_device_id *id; +}; + +int __hda_codec_driver_register(struct hda_codec_driver *drv, const char *name, + struct module *owner); +#define hda_codec_driver_register(drv) \ + __hda_codec_driver_register(drv, KBUILD_MODNAME, THIS_MODULE) +void hda_codec_driver_unregister(struct hda_codec_driver *drv); +#define module_hda_codec_driver(drv) \ + module_driver(drv, hda_codec_driver_register, \ + hda_codec_driver_unregister) + +/* ops set by the preset patch */ +struct hda_codec_ops { + int (*build_controls)(struct hda_codec *codec); + int (*build_pcms)(struct hda_codec *codec); + int (*init)(struct hda_codec *codec); + void (*free)(struct hda_codec *codec); + void (*unsol_event)(struct hda_codec *codec, unsigned int res); + void (*set_power_state)(struct hda_codec *codec, hda_nid_t fg, + unsigned int power_state); +#ifdef CONFIG_PM + int (*suspend)(struct hda_codec *codec); + int (*resume)(struct hda_codec *codec); + int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid); +#endif + void (*reboot_notify)(struct hda_codec *codec); + void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on); +}; + +/* PCM callbacks */ +struct hda_pcm_ops { + int (*open)(struct hda_pcm_stream *info, struct hda_codec *codec, + struct snd_pcm_substream *substream); + int (*close)(struct hda_pcm_stream *info, struct hda_codec *codec, + struct snd_pcm_substream *substream); + int (*prepare)(struct hda_pcm_stream *info, struct hda_codec *codec, + unsigned int stream_tag, unsigned int format, + struct snd_pcm_substream *substream); + int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec, + struct snd_pcm_substream *substream); + unsigned int (*get_delay)(struct hda_pcm_stream *info, + struct hda_codec *codec, + struct snd_pcm_substream *substream); +}; + +/* PCM information for each substream */ +struct hda_pcm_stream { + unsigned int substreams; /* number of substreams, 0 = not exist*/ + unsigned int channels_min; /* min. number of channels */ + unsigned int channels_max; /* max. number of channels */ + hda_nid_t nid; /* default NID to query rates/formats/bps, or set up */ + u32 rates; /* supported rates */ + u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */ + unsigned int maxbps; /* supported max. bit per sample */ + const struct snd_pcm_chmap_elem *chmap; /* chmap to override */ + struct hda_pcm_ops ops; +}; + +/* PCM types */ +enum { + HDA_PCM_TYPE_AUDIO, + HDA_PCM_TYPE_SPDIF, + HDA_PCM_TYPE_HDMI, + HDA_PCM_TYPE_MODEM, + HDA_PCM_NTYPES +}; + +#define SNDRV_PCM_INVALID_DEVICE (-1) +/* for PCM creation */ +struct hda_pcm { + char *name; + struct hda_pcm_stream stream[2]; + unsigned int pcm_type; /* HDA_PCM_TYPE_XXX */ + int device; /* device number to assign */ + struct snd_pcm *pcm; /* assigned PCM instance */ + bool own_chmap; /* codec driver provides own channel maps */ + /* private: */ + struct hda_codec *codec; + struct kref kref; + struct list_head list; +}; + +/* codec information */ +struct hda_codec { + struct hdac_device core; + struct hda_bus *bus; + struct snd_card *card; + unsigned int addr; /* codec addr*/ + u32 probe_id; /* overridden id for probing */ + + /* detected preset */ + const struct hda_device_id *preset; + const char *modelname; /* model name for preset */ + + /* set by patch */ + struct hda_codec_ops patch_ops; + + /* PCM to create, set by patch_ops.build_pcms callback */ + struct list_head pcm_list_head; + + /* codec specific info */ + void *spec; + + /* beep device */ + struct hda_beep *beep; + unsigned int beep_mode; + + /* widget capabilities cache */ + u32 *wcaps; + + struct snd_array mixers; /* list of assigned mixer elements */ + struct snd_array nids; /* list of mapped mixer elements */ + + struct list_head conn_list; /* linked-list of connection-list */ + + struct mutex spdif_mutex; + struct mutex control_mutex; + struct snd_array spdif_out; + unsigned int spdif_in_enable; /* SPDIF input enable? */ + const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */ + struct snd_array init_pins; /* initial (BIOS) pin configurations */ + struct snd_array driver_pins; /* pin configs set by codec parser */ + struct snd_array cvt_setups; /* audio convert setups */ + + struct mutex user_mutex; +#ifdef CONFIG_SND_HDA_RECONFIG + struct snd_array init_verbs; /* additional init verbs */ + struct snd_array hints; /* additional hints */ + struct snd_array user_pins; /* default pin configs to override */ +#endif + +#ifdef CONFIG_SND_HDA_HWDEP + struct snd_hwdep *hwdep; /* assigned hwdep device */ +#endif + + /* misc flags */ + unsigned int in_freeing:1; /* being released */ + unsigned int registered:1; /* codec was registered */ + unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each + * status change + * (e.g. Realtek codecs) + */ + unsigned int pin_amp_workaround:1; /* pin out-amp takes index + * (e.g. Conexant codecs) + */ + unsigned int single_adc_amp:1; /* adc in-amp takes no index + * (e.g. CX20549 codec) + */ + unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */ + unsigned int pins_shutup:1; /* pins are shut up */ + unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ + unsigned int no_jack_detect:1; /* Machine has no jack-detection */ + unsigned int inv_eapd:1; /* broken h/w: inverted EAPD control */ + unsigned int inv_jack_detect:1; /* broken h/w: inverted detection bit */ + unsigned int pcm_format_first:1; /* PCM format must be set first */ + unsigned int cached_write:1; /* write only to caches */ + unsigned int dp_mst:1; /* support DP1.2 Multi-stream transport */ + unsigned int dump_coef:1; /* dump processing coefs in codec proc file */ + unsigned int power_save_node:1; /* advanced PM for each widget */ + unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */ + unsigned int force_pin_prefix:1; /* Add location prefix */ + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ +#ifdef CONFIG_PM + unsigned long power_on_acct; + unsigned long power_off_acct; + unsigned long power_jiffies; +#endif + + /* filter the requested power state per nid */ + unsigned int (*power_filter)(struct hda_codec *codec, hda_nid_t nid, + unsigned int power_state); + + /* codec-specific additional proc output */ + void (*proc_widget_hook)(struct snd_info_buffer *buffer, + struct hda_codec *codec, hda_nid_t nid); + + /* jack detection */ + struct snd_array jacktbl; + unsigned long jackpoll_interval; /* In jiffies. Zero means no poll, rely on unsol events */ + struct delayed_work jackpoll_work; + + /* jack detection */ + struct snd_array jacks; + + int depop_delay; /* depop delay in ms, -1 for default delay time */ + + /* fix-up list */ + int fixup_id; + const struct hda_fixup *fixup_list; + const char *fixup_name; + + /* additional init verbs */ + struct snd_array verbs; +}; + +#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev) +#define hda_codec_dev(_dev) (&(_dev)->core.dev) + +#define list_for_each_codec(c, bus) \ + list_for_each_entry(c, &(bus)->core.codec_list, core.list) +#define list_for_each_codec_safe(c, n, bus) \ + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) + +/* snd_hda_codec_read/write optional flags */ +#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0) + +/* + * constructors + */ +int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card, + unsigned int codec_addr, struct hda_codec **codecp); +int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card, + unsigned int codec_addr, struct hda_codec *codec); +int snd_hda_codec_configure(struct hda_codec *codec); +int snd_hda_codec_update_widgets(struct hda_codec *codec); + +/* + * low level functions + */ +static inline unsigned int +snd_hda_codec_read(struct hda_codec *codec, hda_nid_t nid, + int flags, + unsigned int verb, unsigned int parm) +{ + return snd_hdac_codec_read(&codec->core, nid, flags, verb, parm); +} + +static inline int +snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int flags, + unsigned int verb, unsigned int parm) +{ + return snd_hdac_codec_write(&codec->core, nid, flags, verb, parm); +} + +#define snd_hda_param_read(codec, nid, param) \ + snd_hdac_read_parm(&(codec)->core, nid, param) +#define snd_hda_get_sub_nodes(codec, nid, start_nid) \ + snd_hdac_get_sub_nodes(&(codec)->core, nid, start_nid) +int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid, + hda_nid_t *conn_list, int max_conns); +static inline int +snd_hda_get_num_conns(struct hda_codec *codec, hda_nid_t nid) +{ + return snd_hda_get_connections(codec, nid, NULL, 0); +} + +#define snd_hda_get_raw_connections(codec, nid, list, max_conns) \ + snd_hdac_get_connections(&(codec)->core, nid, list, max_conns) +#define snd_hda_get_num_raw_conns(codec, nid) \ + snd_hdac_get_connections(&(codec)->core, nid, NULL, 0); + +int snd_hda_get_conn_list(struct hda_codec *codec, hda_nid_t nid, + const hda_nid_t **listp); +int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums, + const hda_nid_t *list); +int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux, + hda_nid_t nid, int recursive); +unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid); +int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid, + u8 *dev_list, int max_devices); +int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid); +int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id); + +struct hda_verb { + hda_nid_t nid; + u32 verb; + u32 param; +}; + +void snd_hda_sequence_write(struct hda_codec *codec, + const struct hda_verb *seq); + +/* unsolicited event */ +static inline void +snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex) +{ + snd_hdac_bus_queue_event(&bus->core, res, res_ex); +} + +/* cached write */ +static inline int +snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid, + int flags, unsigned int verb, unsigned int parm) +{ + return snd_hdac_regmap_write(&codec->core, nid, verb, parm); +} + +/* the struct for codec->pin_configs */ +struct hda_pincfg { + hda_nid_t nid; + unsigned char ctrl; /* original pin control value */ + unsigned char target; /* target pin control value */ + unsigned int cfg; /* default configuration */ +}; + +unsigned int snd_hda_codec_get_pincfg(struct hda_codec *codec, hda_nid_t nid); +int snd_hda_codec_set_pincfg(struct hda_codec *codec, hda_nid_t nid, + unsigned int cfg); +int snd_hda_add_pincfg(struct hda_codec *codec, struct snd_array *list, + hda_nid_t nid, unsigned int cfg); /* for hwdep */ +void snd_hda_shutup_pins(struct hda_codec *codec); + +/* SPDIF controls */ +struct hda_spdif_out { + hda_nid_t nid; /* Converter nid values relate to */ + unsigned int status; /* IEC958 status bits */ + unsigned short ctls; /* SPDIF control bits */ +}; +struct hda_spdif_out *snd_hda_spdif_out_of_nid(struct hda_codec *codec, + hda_nid_t nid); +void snd_hda_spdif_ctls_unassign(struct hda_codec *codec, int idx); +void snd_hda_spdif_ctls_assign(struct hda_codec *codec, int idx, hda_nid_t nid); + +/* + * Mixer + */ +int snd_hda_codec_build_controls(struct hda_codec *codec); + +/* + * PCM + */ +int snd_hda_codec_parse_pcms(struct hda_codec *codec); +int snd_hda_codec_build_pcms(struct hda_codec *codec); + +__printf(2, 3) +struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec, + const char *fmt, ...); + +static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm) +{ + kref_get(&pcm->kref); +} +void snd_hda_codec_pcm_put(struct hda_pcm *pcm); + +int snd_hda_codec_prepare(struct hda_codec *codec, + struct hda_pcm_stream *hinfo, + unsigned int stream, + unsigned int format, + struct snd_pcm_substream *substream); +void snd_hda_codec_cleanup(struct hda_codec *codec, + struct hda_pcm_stream *hinfo, + struct snd_pcm_substream *substream); + +void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, + u32 stream_tag, + int channel_id, int format); +void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid, + int do_now); +#define snd_hda_codec_cleanup_stream(codec, nid) \ + __snd_hda_codec_cleanup_stream(codec, nid, 0) + +#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, bpsp) \ + snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, bpsp) +#define snd_hda_is_supported_format(codec, nid, fmt) \ + snd_hdac_is_supported_format(&(codec)->core, nid, fmt) + +extern const struct snd_pcm_chmap_elem snd_pcm_2_1_chmaps[]; + +int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec, + struct hda_pcm *cpcm); + +/* + * Misc + */ +void snd_hda_get_codec_name(struct hda_codec *codec, char *name, int namelen); +void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg, + unsigned int power_state); + +int snd_hda_lock_devices(struct hda_bus *bus); +void snd_hda_unlock_devices(struct hda_bus *bus); +void snd_hda_bus_reset(struct hda_bus *bus); +void snd_hda_bus_reset_codecs(struct hda_bus *bus); + +int snd_hda_codec_set_name(struct hda_codec *codec, const char *name); + +/* + * power management + */ +extern const struct dev_pm_ops hda_codec_driver_pm; + +static inline +int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid) +{ +#ifdef CONFIG_PM + if (codec->patch_ops.check_power_status) + return codec->patch_ops.check_power_status(codec, nid); +#endif + return 0; +} + +/* + * power saving + */ +#define snd_hda_power_up(codec) snd_hdac_power_up(&(codec)->core) +#define snd_hda_power_up_pm(codec) snd_hdac_power_up_pm(&(codec)->core) +#define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core) +#define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core) +#ifdef CONFIG_PM +void snd_hda_set_power_save(struct hda_bus *bus, int delay); +void snd_hda_update_power_acct(struct hda_codec *codec); +#else +static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} +#endif + +#ifdef CONFIG_SND_HDA_PATCH_LOADER +/* + * patch firmware + */ +int snd_hda_load_patch(struct hda_bus *bus, size_t size, const void *buf); +#endif + +#ifdef CONFIG_SND_HDA_DSP_LOADER +int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format, + unsigned int size, + struct snd_dma_buffer *bufp); +void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start); +void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec, + struct snd_dma_buffer *dmab); +#else +static inline int +snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format, + unsigned int size, + struct snd_dma_buffer *bufp) +{ + return -ENOSYS; +} +static inline void +snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) {} +static inline void +snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec, + struct snd_dma_buffer *dmab) {} +#endif + +#endif /* __SOUND_HDA_CODEC_H */ diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index 67561b997915..af3fa577fa06 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -47,10 +47,13 @@ struct snd_dma_device { #define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */ #define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */ #define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */ +#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */ #ifdef CONFIG_SND_DMA_SGBUF #define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */ +#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */ #else #define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */ +#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC #endif #ifdef CONFIG_GENERIC_ALLOCATOR #define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */ diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 6665cb29e1a2..3b5a061132b6 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -171,6 +171,7 @@ int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, unsigned char *buffer, int count); int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count); +int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream); /* main midi functions */ diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 8bc5e2d8b13c..fb0318f9b10f 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -51,29 +51,35 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card, #define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \ asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \ - dai_link->cpu_dai_name) + dai_link->cpu_dai_name, NULL) #define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \ asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\ - dai_link->codec_dai_name) + dai_link->codec_dai_name, dai_link->codecs) int asoc_simple_card_parse_clk(struct device *dev, struct device_node *node, struct device_node *dai_of_node, struct asoc_simple_dai *simple_dai, - const char *name); + const char *dai_name, + struct snd_soc_dai_link_component *dlc); int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai); void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai); #define asoc_simple_card_parse_cpu(node, dai_link, \ list_name, cells_name, is_single_link) \ - asoc_simple_card_parse_dai(node, &dai_link->cpu_of_node, \ + asoc_simple_card_parse_dai(node, NULL, \ + &dai_link->cpu_of_node, \ &dai_link->cpu_dai_name, list_name, cells_name, is_single_link) #define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \ - asoc_simple_card_parse_dai(node, &dai_link->codec_of_node, \ - &dai_link->codec_dai_name, list_name, cells_name, NULL) + asoc_simple_card_parse_dai(node, dai_link->codecs, \ + &dai_link->codec_of_node, \ + &dai_link->codec_dai_name, \ + list_name, cells_name, NULL) #define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \ - asoc_simple_card_parse_dai(node, &dai_link->platform_of_node, \ + asoc_simple_card_parse_dai(node, dai_link->platform, \ + &dai_link->platform_of_node, \ NULL, list_name, cells_name, NULL) int asoc_simple_card_parse_dai(struct device_node *node, + struct snd_soc_dai_link_component *dlc, struct device_node **endpoint_np, const char **dai_name, const char *list_name, @@ -81,12 +87,15 @@ int asoc_simple_card_parse_dai(struct device_node *node, int *is_single_links); #define asoc_simple_card_parse_graph_cpu(ep, dai_link) \ - asoc_simple_card_parse_graph_dai(ep, &dai_link->cpu_of_node, \ + asoc_simple_card_parse_graph_dai(ep, NULL, \ + &dai_link->cpu_of_node, \ &dai_link->cpu_dai_name) #define asoc_simple_card_parse_graph_codec(ep, dai_link) \ - asoc_simple_card_parse_graph_dai(ep, &dai_link->codec_of_node, \ + asoc_simple_card_parse_graph_dai(ep, dai_link->codecs, \ + &dai_link->codec_of_node, \ &dai_link->codec_dai_name) int asoc_simple_card_parse_graph_dai(struct device_node *ep, + struct snd_soc_dai_link_component *dlc, struct device_node **endpoint_np, const char **dai_name); diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index bb1d24b703fb..f48f59e5b7b0 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -25,4 +25,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[]; +/* + * generic table used for HDA codec-based platforms, possibly with + * additional ACPI-enumerated codecs + */ +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[]; + #endif diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index fdaaafdc7a00..bd8163f151cb 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -406,12 +406,6 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai); int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); -int snd_soc_dapm_new_pcm(struct snd_soc_card *card, - struct snd_soc_pcm_runtime *rtd, - const struct snd_soc_pcm_stream *params, - unsigned int num_params, - struct snd_soc_dapm_widget *source, - struct snd_soc_dapm_widget *sink); /* dapm path setup */ int snd_soc_dapm_new_widgets(struct snd_soc_card *card); @@ -590,9 +584,6 @@ struct snd_soc_dapm_widget { void *priv; /* widget specific data */ struct regulator *regulator; /* attached regulator */ struct pinctrl *pinctrl; /* attached pinctrl */ - const struct snd_soc_pcm_stream *params; /* params for dai links */ - unsigned int num_params; /* number of params for dai links */ - unsigned int params_select; /* currently selected param for dai link */ /* dapm control */ int reg; /* negative reg = no direct dapm */ diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index 9bb92f187af8..4be3a2b7c106 100644 --- a/include/sound/soc-dpcm.h +++ b/include/sound/soc-dpcm.h @@ -103,6 +103,16 @@ struct snd_soc_dpcm_runtime { int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */ }; +#define for_each_dpcm_fe(be, stream, dpcm) \ + list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe) + +#define for_each_dpcm_be(fe, stream, dpcm) \ + list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be) +#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \ + list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be) +#define for_each_dpcm_be_rollback(fe, stream, dpcm) \ + list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be) + /* can this BE stop and free */ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream); diff --git a/include/sound/soc.h b/include/sound/soc.h index 41cec42fb456..f1dab1f4b194 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -372,6 +372,11 @@ #define SND_SOC_COMP_ORDER_LATE 1 #define SND_SOC_COMP_ORDER_LAST 2 +#define for_each_comp_order(order) \ + for (order = SND_SOC_COMP_ORDER_FIRST; \ + order <= SND_SOC_COMP_ORDER_LAST; \ + order++) + /* * Bias levels * @@ -859,6 +864,11 @@ struct snd_soc_component { #endif }; +#define for_each_component_dais(component, dai)\ + list_for_each_entry(dai, &(component)->dai_list, list) +#define for_each_component_dais_safe(component, dai, _dai)\ + list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list) + struct snd_soc_rtdcom_list { struct snd_soc_component *component; struct list_head list; /* rtd::component_list */ @@ -915,6 +925,8 @@ struct snd_soc_dai_link { */ const char *platform_name; struct device_node *platform_of_node; + struct snd_soc_dai_link_component *platform; + int id; /* optional ID for machine driver link identification */ const struct snd_soc_pcm_stream *params; @@ -976,6 +988,10 @@ struct snd_soc_dai_link { struct list_head list; /* DAI link list of the soc card */ struct snd_soc_dobj dobj; /* For topology */ }; +#define for_each_link_codecs(link, i, codec) \ + for ((i) = 0; \ + ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \ + (i)++) struct snd_soc_codec_conf { /* @@ -1054,7 +1070,6 @@ struct snd_soc_card { struct snd_soc_dai_link *dai_link; /* predefined links only */ int num_links; /* predefined links only */ struct list_head dai_link_list; /* all links */ - int num_dai_links; struct list_head rtd_list; int num_rtd; @@ -1092,6 +1107,7 @@ struct snd_soc_card { /* lists of probed devices belonging to this card */ struct list_head component_dev_list; + struct list_head list; struct list_head widgets; struct list_head paths; @@ -1114,6 +1130,23 @@ struct snd_soc_card { void *drvdata; }; +#define for_each_card_prelinks(card, i, link) \ + for ((i) = 0; \ + ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \ + (i)++) + +#define for_each_card_links(card, link) \ + list_for_each_entry(dai_link, &(card)->dai_link_list, list) +#define for_each_card_links_safe(card, link, _link) \ + list_for_each_entry_safe(link, _link, &(card)->dai_link_list, list) + +#define for_each_card_rtds(card, rtd) \ + list_for_each_entry(rtd, &(card)->rtd_list, list) +#define for_each_card_rtds_safe(card, rtd, _rtd) \ + list_for_each_entry_safe(rtd, _rtd, &(card)->rtd_list, list) + +#define for_each_card_components(card, component) \ + list_for_each_entry(component, &(card)->component_dev_list, card_list) /* SoC machine DAI configuration, glues a codec and cpu DAI together */ struct snd_soc_pcm_runtime { @@ -1124,6 +1157,8 @@ struct snd_soc_pcm_runtime { enum snd_soc_pcm_subclass pcm_subclass; struct snd_pcm_ops ops; + unsigned int params_select; /* currently selected param for dai link */ + /* Dynamic PCM BE runtime data */ struct snd_soc_dpcm_runtime dpcm[2]; int fe_compr; @@ -1152,6 +1187,13 @@ struct snd_soc_pcm_runtime { unsigned int dev_registered:1; unsigned int pop_wait:1; }; +#define for_each_rtd_codec_dai(rtd, i, dai)\ + for ((i) = 0; \ + ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ + (i)++) +#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \ + for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);) + /* mixer control */ struct soc_mixer_control { @@ -1359,6 +1401,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card) INIT_LIST_HEAD(&card->dapm_list); INIT_LIST_HEAD(&card->aux_comp_list); INIT_LIST_HEAD(&card->component_dev_list); + INIT_LIST_HEAD(&card->list); } static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc) diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index f2e6abea8490..24c398f4a68f 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -25,6 +25,7 @@ struct sock; #define ISCSIT_TCP_BACKLOG 256 #define ISCSI_RX_THREAD_NAME "iscsi_trx" #define ISCSI_TX_THREAD_NAME "iscsi_ttx" +#define ISCSI_IQN_LEN 224 /* struct iscsi_node_attrib sanity values */ #define NA_DATAOUT_TIMEOUT 3 @@ -270,9 +271,9 @@ struct iscsi_conn_ops { }; struct iscsi_sess_ops { - char InitiatorName[224]; + char InitiatorName[ISCSI_IQN_LEN]; char InitiatorAlias[256]; - char TargetName[224]; + char TargetName[ISCSI_IQN_LEN]; char TargetAlias[256]; char TargetAddress[256]; u16 TargetPortalGroupTag; /* [0..65535] */ @@ -855,7 +856,6 @@ struct iscsi_wwn_stat_grps { }; struct iscsi_tiqn { -#define ISCSI_IQN_LEN 224 unsigned char tiqn[ISCSI_IQN_LEN]; enum tiqn_state_table tiqn_state; int tiqn_access_count; diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h index 4d75a2c426ca..ff6a47209313 100644 --- a/include/target/iscsi/iscsi_target_stat.h +++ b/include/target/iscsi/iscsi_target_stat.h @@ -33,7 +33,7 @@ struct iscsi_sess_err_stats { u32 cxn_timeout_errors; u32 pdu_format_errors; u32 last_sess_failure_type; - char last_sess_fail_rem_name[224]; + char last_sess_fail_rem_name[ISCSI_IQN_LEN]; } ____cacheline_aligned; /* iSCSI login failure types (sub oids) */ @@ -56,7 +56,7 @@ struct iscsi_login_stats { u32 last_fail_type; int last_intr_fail_ip_family; struct sockaddr_storage last_intr_fail_sockaddr; - char last_intr_fail_name[224]; + char last_intr_fail_name[ISCSI_IQN_LEN]; } ____cacheline_aligned; /* iSCSI logout stats */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 7a4ee7852ca4..e3bdb0550a59 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -138,7 +138,6 @@ enum se_cmd_flags_table { SCF_ALUA_NON_OPTIMIZED = 0x00008000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, SCF_COMPARE_AND_WRITE = 0x00080000, - SCF_COMPARE_AND_WRITE_POST = 0x00100000, SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, SCF_ACK_KREF = 0x00400000, SCF_USE_CPUID = 0x00800000, diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index b401c4e36394..8568946f491d 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -316,7 +316,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular, ), TP_fast_assign_btrfs(bi->root->fs_info, - __entry->root_obj = bi->root->objectid; + __entry->root_obj = bi->root->root_key.objectid; __entry->ino = btrfs_ino(bi); __entry->isize = bi->vfs_inode.i_size; __entry->disk_isize = bi->disk_i_size; @@ -367,7 +367,7 @@ DECLARE_EVENT_CLASS( TP_fast_assign_btrfs( bi->root->fs_info, - __entry->root_obj = bi->root->objectid; + __entry->root_obj = bi->root->root_key.objectid; __entry->ino = btrfs_ino(bi); __entry->isize = bi->vfs_inode.i_size; __entry->disk_isize = bi->disk_i_size; @@ -1477,7 +1477,8 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data, ), TP_fast_assign_btrfs(btrfs_sb(inode->i_sb), - __entry->rootid = BTRFS_I(inode)->root->objectid; + __entry->rootid = + BTRFS_I(inode)->root->root_key.objectid; __entry->ino = btrfs_ino(BTRFS_I(inode)); __entry->start = start; __entry->len = len; @@ -1575,6 +1576,27 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent, TP_ARGS(fs_info, rec) ); +TRACE_EVENT(qgroup_num_dirty_extents, + + TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid, + u64 num_dirty_extents), + + TP_ARGS(fs_info, transid, num_dirty_extents), + + TP_STRUCT__entry_btrfs( + __field( u64, transid ) + __field( u64, num_dirty_extents ) + ), + + TP_fast_assign_btrfs(fs_info, + __entry->transid = transid; + __entry->num_dirty_extents = num_dirty_extents; + ), + + TP_printk_btrfs("transid=%llu num_dirty_extents=%llu", + __entry->transid, __entry->num_dirty_extents) +); + TRACE_EVENT(btrfs_qgroup_account_extent, TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid, u64 bytenr, @@ -1675,7 +1697,7 @@ TRACE_EVENT(qgroup_meta_reserve, ), TP_fast_assign_btrfs(root->fs_info, - __entry->refroot = root->objectid; + __entry->refroot = root->root_key.objectid; __entry->diff = diff; ), @@ -1697,7 +1719,7 @@ TRACE_EVENT(qgroup_meta_convert, ), TP_fast_assign_btrfs(root->fs_info, - __entry->refroot = root->objectid; + __entry->refroot = root->root_key.objectid; __entry->diff = diff; ), @@ -1721,7 +1743,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans, ), TP_fast_assign_btrfs(root->fs_info, - __entry->refroot = root->objectid; + __entry->refroot = root->root_key.objectid; spin_lock(&root->qgroup_meta_rsv_lock); __entry->diff = -(s64)root->qgroup_meta_rsv_pertrans; spin_unlock(&root->qgroup_meta_rsv_lock); @@ -1802,7 +1824,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents, ), TP_fast_assign_btrfs(root->fs_info, - __entry->root_objectid = root->objectid; + __entry->root_objectid = root->root_key.objectid; __entry->ino = ino; __entry->mod = mod; ), diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 0e31eb136c57..698e0d8a5ca4 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -17,6 +17,7 @@ struct mpage_da_data; struct ext4_map_blocks; struct extent_status; struct ext4_fsmap; +struct partial_cluster; #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode)) @@ -2035,21 +2036,23 @@ TRACE_EVENT(ext4_ext_show_extent, ); TRACE_EVENT(ext4_remove_blocks, - TP_PROTO(struct inode *inode, struct ext4_extent *ex, - ext4_lblk_t from, ext4_fsblk_t to, - long long partial_cluster), + TP_PROTO(struct inode *inode, struct ext4_extent *ex, + ext4_lblk_t from, ext4_fsblk_t to, + struct partial_cluster *pc), - TP_ARGS(inode, ex, from, to, partial_cluster), + TP_ARGS(inode, ex, from, to, pc), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( ext4_lblk_t, from ) __field( ext4_lblk_t, to ) - __field( long long, partial ) __field( ext4_fsblk_t, ee_pblk ) __field( ext4_lblk_t, ee_lblk ) __field( unsigned short, ee_len ) + __field( ext4_fsblk_t, pc_pclu ) + __field( ext4_lblk_t, pc_lblk ) + __field( int, pc_state) ), TP_fast_assign( @@ -2057,14 +2060,16 @@ TRACE_EVENT(ext4_remove_blocks, __entry->ino = inode->i_ino; __entry->from = from; __entry->to = to; - __entry->partial = partial_cluster; __entry->ee_pblk = ext4_ext_pblock(ex); __entry->ee_lblk = le32_to_cpu(ex->ee_block); __entry->ee_len = ext4_ext_get_actual_len(ex); + __entry->pc_pclu = pc->pclu; + __entry->pc_lblk = pc->lblk; + __entry->pc_state = pc->state; ), TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]" - "from %u to %u partial_cluster %lld", + "from %u to %u partial [pclu %lld lblk %u state %d]", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->ee_lblk, @@ -2072,45 +2077,53 @@ TRACE_EVENT(ext4_remove_blocks, (unsigned short) __entry->ee_len, (unsigned) __entry->from, (unsigned) __entry->to, - (long long) __entry->partial) + (long long) __entry->pc_pclu, + (unsigned int) __entry->pc_lblk, + (int) __entry->pc_state) ); TRACE_EVENT(ext4_ext_rm_leaf, TP_PROTO(struct inode *inode, ext4_lblk_t start, struct ext4_extent *ex, - long long partial_cluster), + struct partial_cluster *pc), - TP_ARGS(inode, start, ex, partial_cluster), + TP_ARGS(inode, start, ex, pc), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) - __field( long long, partial ) __field( ext4_lblk_t, start ) __field( ext4_lblk_t, ee_lblk ) __field( ext4_fsblk_t, ee_pblk ) __field( short, ee_len ) + __field( ext4_fsblk_t, pc_pclu ) + __field( ext4_lblk_t, pc_lblk ) + __field( int, pc_state) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->partial = partial_cluster; __entry->start = start; __entry->ee_lblk = le32_to_cpu(ex->ee_block); __entry->ee_pblk = ext4_ext_pblock(ex); __entry->ee_len = ext4_ext_get_actual_len(ex); + __entry->pc_pclu = pc->pclu; + __entry->pc_lblk = pc->lblk; + __entry->pc_state = pc->state; ), TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]" - "partial_cluster %lld", + "partial [pclu %lld lblk %u state %d]", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->start, (unsigned) __entry->ee_lblk, (unsigned long long) __entry->ee_pblk, (unsigned short) __entry->ee_len, - (long long) __entry->partial) + (long long) __entry->pc_pclu, + (unsigned int) __entry->pc_lblk, + (int) __entry->pc_state) ); TRACE_EVENT(ext4_ext_rm_idx, @@ -2168,9 +2181,9 @@ TRACE_EVENT(ext4_ext_remove_space, TRACE_EVENT(ext4_ext_remove_space_done, TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end, - int depth, long long partial, __le16 eh_entries), + int depth, struct partial_cluster *pc, __le16 eh_entries), - TP_ARGS(inode, start, end, depth, partial, eh_entries), + TP_ARGS(inode, start, end, depth, pc, eh_entries), TP_STRUCT__entry( __field( dev_t, dev ) @@ -2178,7 +2191,9 @@ TRACE_EVENT(ext4_ext_remove_space_done, __field( ext4_lblk_t, start ) __field( ext4_lblk_t, end ) __field( int, depth ) - __field( long long, partial ) + __field( ext4_fsblk_t, pc_pclu ) + __field( ext4_lblk_t, pc_lblk ) + __field( int, pc_state ) __field( unsigned short, eh_entries ) ), @@ -2188,18 +2203,23 @@ TRACE_EVENT(ext4_ext_remove_space_done, __entry->start = start; __entry->end = end; __entry->depth = depth; - __entry->partial = partial; + __entry->pc_pclu = pc->pclu; + __entry->pc_lblk = pc->lblk; + __entry->pc_state = pc->state; __entry->eh_entries = le16_to_cpu(eh_entries); ), - TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld " + TP_printk("dev %d,%d ino %lu since %u end %u depth %d " + "partial [pclu %lld lblk %u state %d] " "remaining_entries %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned) __entry->start, (unsigned) __entry->end, __entry->depth, - (long long) __entry->partial, + (long long) __entry->pc_pclu, + (unsigned int) __entry->pc_lblk, + (int) __entry->pc_state, (unsigned short) __entry->eh_entries) ); @@ -2270,7 +2290,7 @@ TRACE_EVENT(ext4_es_remove_extent, __entry->lblk, __entry->len) ); -TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, +TRACE_EVENT(ext4_es_find_extent_range_enter, TP_PROTO(struct inode *inode, ext4_lblk_t lblk), TP_ARGS(inode, lblk), @@ -2292,7 +2312,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_enter, (unsigned long) __entry->ino, __entry->lblk) ); -TRACE_EVENT(ext4_es_find_delayed_extent_range_exit, +TRACE_EVENT(ext4_es_find_extent_range_exit, TP_PROTO(struct inode *inode, struct extent_status *es), TP_ARGS(inode, es), @@ -2512,6 +2532,41 @@ TRACE_EVENT(ext4_es_shrink, __entry->scan_time, __entry->nr_skipped, __entry->retried) ); +TRACE_EVENT(ext4_es_insert_delayed_block, + TP_PROTO(struct inode *inode, struct extent_status *es, + bool allocated), + + TP_ARGS(inode, es, allocated), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ext4_lblk_t, lblk ) + __field( ext4_lblk_t, len ) + __field( ext4_fsblk_t, pblk ) + __field( char, status ) + __field( bool, allocated ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblk = es->es_lblk; + __entry->len = es->es_len; + __entry->pblk = ext4_es_pblock(es); + __entry->status = ext4_es_status(es); + __entry->allocated = allocated; + ), + + TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s " + "allocated %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + __entry->lblk, __entry->len, + __entry->pblk, show_extent_status(__entry->status), + __entry->allocated) +); + /* fsmap traces */ DECLARE_EVENT_CLASS(ext4_fsmap_class, TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len, diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 795698925d20..3ec73f17ee2a 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(CP_TRIMMED); { REQ_OP_WRITE, "WRITE" }, \ { REQ_OP_FLUSH, "FLUSH" }, \ { REQ_OP_DISCARD, "DISCARD" }, \ - { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \ { REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \ { REQ_OP_ZONE_RESET, "ZONE_RESET" }, \ { REQ_OP_WRITE_SAME, "WRITE_SAME" }, \ diff --git a/include/trace/events/hwmon.h b/include/trace/events/hwmon.h new file mode 100644 index 000000000000..d7a1d0ffb679 --- /dev/null +++ b/include/trace/events/hwmon.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hwmon + +#if !defined(_TRACE_HWMON_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HWMON_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(hwmon_attr_class, + + TP_PROTO(int index, const char *attr_name, long val), + + TP_ARGS(index, attr_name, val), + + TP_STRUCT__entry( + __field(int, index) + __string(attr_name, attr_name) + __field(long, val) + ), + + TP_fast_assign( + __entry->index = index; + __assign_str(attr_name, attr_name); + __entry->val = val; + ), + + TP_printk("index=%d, attr_name=%s, val=%ld", + __entry->index, __get_str(attr_name), __entry->val) +); + +DEFINE_EVENT(hwmon_attr_class, hwmon_attr_show, + + TP_PROTO(int index, const char *attr_name, long val), + + TP_ARGS(index, attr_name, val) +); + +DEFINE_EVENT(hwmon_attr_class, hwmon_attr_store, + + TP_PROTO(int index, const char *attr_name, long val), + + TP_ARGS(index, attr_name, val) +); + +TRACE_EVENT(hwmon_attr_show_string, + + TP_PROTO(int index, const char *attr_name, const char *s), + + TP_ARGS(index, attr_name, s), + + TP_STRUCT__entry( + __field(int, index) + __string(attr_name, attr_name) + __string(label, s) + ), + + TP_fast_assign( + __entry->index = index; + __assign_str(attr_name, attr_name); + __assign_str(label, s); + ), + + TP_printk("index=%d, attr_name=%s, val=%s", + __entry->index, __get_str(attr_name), __get_str(label)) +); + +#endif /* _TRACE_HWMON_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h new file mode 100644 index 000000000000..a9834c37ac40 --- /dev/null +++ b/include/trace/events/kyber.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kyber + +#if !defined(_TRACE_KYBER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KYBER_H + +#include <linux/blkdev.h> +#include <linux/tracepoint.h> + +#define DOMAIN_LEN 16 +#define LATENCY_TYPE_LEN 8 + +TRACE_EVENT(kyber_latency, + + TP_PROTO(struct request_queue *q, const char *domain, const char *type, + unsigned int percentile, unsigned int numerator, + unsigned int denominator, unsigned int samples), + + TP_ARGS(q, domain, type, percentile, numerator, denominator, samples), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __array( char, domain, DOMAIN_LEN ) + __array( char, type, LATENCY_TYPE_LEN ) + __field( u8, percentile ) + __field( u8, numerator ) + __field( u8, denominator ) + __field( unsigned int, samples ) + ), + + TP_fast_assign( + __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); + strlcpy(__entry->domain, domain, DOMAIN_LEN); + strlcpy(__entry->type, type, DOMAIN_LEN); + __entry->percentile = percentile; + __entry->numerator = numerator; + __entry->denominator = denominator; + __entry->samples = samples; + ), + + TP_printk("%d,%d %s %s p%u %u/%u samples=%u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain, + __entry->type, __entry->percentile, __entry->numerator, + __entry->denominator, __entry->samples) +); + +TRACE_EVENT(kyber_adjust, + + TP_PROTO(struct request_queue *q, const char *domain, + unsigned int depth), + + TP_ARGS(q, domain, depth), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __array( char, domain, DOMAIN_LEN ) + __field( unsigned int, depth ) + ), + + TP_fast_assign( + __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); + strlcpy(__entry->domain, domain, DOMAIN_LEN); + __entry->depth = depth; + ), + + TP_printk("%d,%d %s %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain, + __entry->depth) +); + +TRACE_EVENT(kyber_throttled, + + TP_PROTO(struct request_queue *q, const char *domain), + + TP_ARGS(q, domain), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __array( char, domain, DOMAIN_LEN ) + ), + + TP_fast_assign( + __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); + strlcpy(__entry->domain, domain, DOMAIN_LEN); + ), + + TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->domain) +); + +#define _TRACE_KYBER_H +#endif /* _TRACE_KYBER_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 711372845945..705b33d1e395 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); - -TRACE_EVENT(mm_numa_migrate_ratelimit, - - TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages), - - TP_ARGS(p, dst_nid, nr_pages), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN) - __field( pid_t, pid) - __field( int, dst_nid) - __field( unsigned long, nr_pages) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->dst_nid = dst_nid; - __entry->nr_pages = nr_pages; - ), - - TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu", - __entry->comm, - __entry->pid, - __entry->dst_nid, - __entry->nr_pages) -); #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index a81cffb76d89..a1675d43777e 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -88,6 +88,7 @@ {1UL << PG_dirty, "dirty" }, \ {1UL << PG_lru, "lru" }, \ {1UL << PG_active, "active" }, \ + {1UL << PG_workingset, "workingset" }, \ {1UL << PG_slab, "slab" }, \ {1UL << PG_owner_priv_1, "owner_priv_1" }, \ {1UL << PG_arch_1, "arch_1" }, \ diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index a8d07feff6a0..f0c4d10e614b 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -393,9 +393,8 @@ TRACE_EVENT(rcu_quiescent_state_report, * Tracepoint for quiescent states detected by force_quiescent_state(). * These trace events include the type of RCU, the grace-period number * that was blocked by the CPU, the CPU itself, and the type of quiescent - * state, which can be "dti" for dyntick-idle mode, "kick" when kicking - * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the - * CPU got a quiescent state via its rcu_qs_ctr. + * state, which can be "dti" for dyntick-idle mode or "kick" when kicking + * a CPU that has been in dyntick-idle mode for too long. */ TRACE_EVENT(rcu_fqs, @@ -705,20 +704,20 @@ TRACE_EVENT(rcu_torture_read, ); /* - * Tracepoint for _rcu_barrier() execution. The string "s" describes - * the _rcu_barrier phase: - * "Begin": _rcu_barrier() started. - * "EarlyExit": _rcu_barrier() piggybacked, thus early exit. - * "Inc1": _rcu_barrier() piggyback check counter incremented. - * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU - * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU. - * "OnlineQ": _rcu_barrier() found online CPU with callbacks. - * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks. + * Tracepoint for rcu_barrier() execution. The string "s" describes + * the rcu_barrier phase: + * "Begin": rcu_barrier() started. + * "EarlyExit": rcu_barrier() piggybacked, thus early exit. + * "Inc1": rcu_barrier() piggyback check counter incremented. + * "OfflineNoCB": rcu_barrier() found callback on never-online CPU + * "OnlineNoCB": rcu_barrier() found online no-CBs CPU. + * "OnlineQ": rcu_barrier() found online CPU with callbacks. + * "OnlineNQ": rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. * "IRQNQ": An rcu_barrier_callback() callback found no callbacks. * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "LastCB": An rcu_barrier_callback() invoked the last callback. - * "Inc2": _rcu_barrier() piggyback check counter incremented. + * "Inc2": rcu_barrier() piggyback check counter incremented. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 53df203b8057..b093058f78aa 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -263,7 +263,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ); #define DEFINE_MR_EVENT(name) \ - DEFINE_EVENT(xprtrdma_mr, name, \ + DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ TP_PROTO( \ const struct rpcrdma_mr *mr \ ), \ @@ -306,7 +306,7 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event, ** Connection events **/ -TRACE_EVENT(xprtrdma_conn_upcall, +TRACE_EVENT(xprtrdma_cm_event, TP_PROTO( const struct rpcrdma_xprt *r_xprt, struct rdma_cm_event *event @@ -377,7 +377,7 @@ DEFINE_RXPRT_EVENT(xprtrdma_reinsert); DEFINE_RXPRT_EVENT(xprtrdma_reconnect); DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc); -TRACE_EVENT(xprtrdma_qp_error, +TRACE_EVENT(xprtrdma_qp_event, TP_PROTO( const struct rpcrdma_xprt *r_xprt, const struct ib_event *event @@ -509,7 +509,7 @@ TRACE_EVENT(xprtrdma_post_send, TP_STRUCT__entry( __field(const void *, req) __field(int, num_sge) - __field(bool, signaled) + __field(int, signaled) __field(int, status) ), @@ -651,11 +651,11 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); -DEFINE_MR_EVENT(xprtrdma_localinv); -DEFINE_MR_EVENT(xprtrdma_dma_map); -DEFINE_MR_EVENT(xprtrdma_dma_unmap); -DEFINE_MR_EVENT(xprtrdma_remoteinv); -DEFINE_MR_EVENT(xprtrdma_recover_mr); +DEFINE_MR_EVENT(localinv); +DEFINE_MR_EVENT(map); +DEFINE_MR_EVENT(unmap); +DEFINE_MR_EVENT(remoteinv); +DEFINE_MR_EVENT(recycle); /** ** Reply events diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 196587b8f204..573d5b901fb1 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -56,7 +56,6 @@ enum rxrpc_peer_trace { rxrpc_peer_new, rxrpc_peer_processing, rxrpc_peer_put, - rxrpc_peer_queued_error, }; enum rxrpc_conn_trace { @@ -257,8 +256,7 @@ enum rxrpc_tx_point { EM(rxrpc_peer_got, "GOT") \ EM(rxrpc_peer_new, "NEW") \ EM(rxrpc_peer_processing, "PRO") \ - EM(rxrpc_peer_put, "PUT") \ - E_(rxrpc_peer_queued_error, "QER") + E_(rxrpc_peer_put, "PUT") #define rxrpc_conn_traces \ EM(rxrpc_conn_got, "GOT") \ @@ -933,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet, TP_fast_assign( __entry->call = call_id; memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); + __entry->where = where; ), TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 0be866c91f62..f07b270d4fc4 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -159,9 +159,14 @@ TRACE_EVENT(sched_switch, (__entry->prev_state & (TASK_REPORT_MAX - 1)) ? __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|", - { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" }, - { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" }, - { 0x40, "P" }, { 0x80, "I" }) : + { TASK_INTERRUPTIBLE, "S" }, + { TASK_UNINTERRUPTIBLE, "D" }, + { __TASK_STOPPED, "T" }, + { __TASK_TRACED, "t" }, + { EXIT_DEAD, "X" }, + { EXIT_ZOMBIE, "Z" }, + { TASK_PARKED, "P" }, + { TASK_DEAD, "I" }) : "R", __entry->prev_state & TASK_REPORT_MAX ? "+" : "", diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index 86582923d51c..1db7e4b07c01 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -11,8 +11,7 @@ #define TP_STORE_SIGINFO(__entry, info) \ do { \ - if (info == SEND_SIG_NOINFO || \ - info == SEND_SIG_FORCED) { \ + if (info == SEND_SIG_NOINFO) { \ __entry->errno = 0; \ __entry->code = SI_USER; \ } else if (info == SEND_SIG_PRIV) { \ @@ -50,7 +49,7 @@ enum { */ TRACE_EVENT(signal_generate, - TP_PROTO(int sig, struct siginfo *info, struct task_struct *task, + TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task, int group, int result), TP_ARGS(sig, info, task, group, result), @@ -96,7 +95,7 @@ TRACE_EVENT(signal_generate, */ TRACE_EVENT(signal_deliver, - TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka), + TP_PROTO(int sig, struct kernel_siginfo *info, struct k_sigaction *ka), TP_ARGS(sig, info, ka), diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index bbb08a3ef5cc..28e384186c35 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -470,14 +470,14 @@ TRACE_EVENT(xprt_ping, __get_str(addr), __get_str(port), __entry->status) ); -TRACE_EVENT(xs_tcp_data_ready, - TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total), +TRACE_EVENT(xs_stream_read_data, + TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total), TP_ARGS(xprt, err, total), TP_STRUCT__entry( - __field(int, err) - __field(unsigned int, total) + __field(ssize_t, err) + __field(size_t, total) __string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)") __string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] : @@ -493,21 +493,11 @@ TRACE_EVENT(xs_tcp_data_ready, xprt->address_strings[RPC_DISPLAY_PORT] : "(null)"); ), - TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr), + TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr), __get_str(port), __entry->err, __entry->total) ); -#define rpc_show_sock_xprt_flags(flags) \ - __print_flags(flags, "|", \ - { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \ - { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \ - { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \ - { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \ - { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \ - { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \ - { TCP_RPC_REPLY, "TCP_RPC_REPLY" }) - -TRACE_EVENT(xs_tcp_data_recv, +TRACE_EVENT(xs_stream_read_request, TP_PROTO(struct sock_xprt *xs), TP_ARGS(xs), @@ -516,25 +506,22 @@ TRACE_EVENT(xs_tcp_data_recv, __string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]) __string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]) __field(u32, xid) - __field(unsigned long, flags) __field(unsigned long, copied) __field(unsigned int, reclen) - __field(unsigned long, offset) + __field(unsigned int, offset) ), TP_fast_assign( __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]); __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]); - __entry->xid = be32_to_cpu(xs->tcp_xid); - __entry->flags = xs->tcp_flags; - __entry->copied = xs->tcp_copied; - __entry->reclen = xs->tcp_reclen; - __entry->offset = xs->tcp_offset; + __entry->xid = be32_to_cpu(xs->recv.xid); + __entry->copied = xs->recv.copied; + __entry->reclen = xs->recv.len; + __entry->offset = xs->recv.offset; ), - TP_printk("peer=[%s]:%s xid=0x%08x flags=%s copied=%lu reclen=%u offset=%lu", + TP_printk("peer=[%s]:%s xid=0x%08x copied=%lu reclen=%u offset=%u", __get_str(addr), __get_str(port), __entry->xid, - rpc_show_sock_xprt_flags(__entry->flags), __entry->copied, __entry->reclen, __entry->offset) ); diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index ac55b328d61b..2bc9960a31aa 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -56,6 +56,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, TP_STRUCT__entry( __field(const void *, skbaddr) __field(const void *, skaddr) + __field(int, state) __field(__u16, sport) __field(__u16, dport) __array(__u8, saddr, 4) @@ -70,6 +71,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, __entry->skbaddr = skb; __entry->skaddr = sk; + __entry->state = sk->sk_state; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); @@ -84,9 +86,10 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), - TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", + TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n", __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, - __entry->saddr_v6, __entry->daddr_v6) + __entry->saddr_v6, __entry->daddr_v6, + show_tcp_state_name(__entry->state)) ); DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb, diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h index e4732d3c2998..b0f8e87235bd 100644 --- a/include/uapi/asm-generic/hugetlb_encode.h +++ b/include/uapi/asm-generic/hugetlb_encode.h @@ -26,7 +26,9 @@ #define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT) +#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT) +#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT) #define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT) diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 80e2a7227205..cb3d6c267181 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -10,18 +10,7 @@ typedef union sigval { void __user *sival_ptr; } sigval_t; -/* - * This is the size (including padding) of the part of the - * struct siginfo that is before the union. - */ -#ifndef __ARCH_SI_PREAMBLE_SIZE -#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) -#endif - #define SI_MAX_SIZE 128 -#ifndef SI_PAD_SIZE -#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int)) -#endif /* * The default "si_band" type is "long", as specified by POSIX. @@ -40,96 +29,108 @@ typedef union sigval { #define __ARCH_SI_ATTRIBUTES #endif -typedef struct siginfo { - int si_signo; -#ifndef __ARCH_HAS_SWAPPED_SIGINFO - int si_errno; - int si_code; -#else - int si_code; - int si_errno; -#endif - - union { - int _pad[SI_PAD_SIZE]; - - /* kill() */ - struct { - __kernel_pid_t _pid; /* sender's pid */ - __kernel_uid32_t _uid; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - __kernel_timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ - } _timer; - - /* POSIX.1b signals */ - struct { - __kernel_pid_t _pid; /* sender's pid */ - __kernel_uid32_t _uid; /* sender's uid */ - sigval_t _sigval; - } _rt; - - /* SIGCHLD */ - struct { - __kernel_pid_t _pid; /* which child */ - __kernel_uid32_t _uid; /* sender's uid */ - int _status; /* exit code */ - __ARCH_SI_CLOCK_T _utime; - __ARCH_SI_CLOCK_T _stime; - } _sigchld; - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ - struct { - void __user *_addr; /* faulting insn/memory ref. */ +union __sifields { + /* kill() */ + struct { + __kernel_pid_t _pid; /* sender's pid */ + __kernel_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + __kernel_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + sigval_t _sigval; /* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + __kernel_pid_t _pid; /* sender's pid */ + __kernel_uid32_t _uid; /* sender's uid */ + sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + __kernel_pid_t _pid; /* which child */ + __kernel_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + __ARCH_SI_CLOCK_T _utime; + __ARCH_SI_CLOCK_T _stime; + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ + struct { + void __user *_addr; /* faulting insn/memory ref. */ #ifdef __ARCH_SI_TRAPNO - int _trapno; /* TRAP # which caused the signal */ + int _trapno; /* TRAP # which caused the signal */ #endif #ifdef __ia64__ - int _imm; /* immediate value for "break" */ - unsigned int _flags; /* see ia64 si_flags */ - unsigned long _isr; /* isr */ + int _imm; /* immediate value for "break" */ + unsigned int _flags; /* see ia64 si_flags */ + unsigned long _isr; /* isr */ #endif #define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ sizeof(short) : __alignof__(void *)) - union { - /* - * used when si_code=BUS_MCEERR_AR or - * used when si_code=BUS_MCEERR_AO - */ - short _addr_lsb; /* LSB of the reported address */ - /* used when si_code=SEGV_BNDERR */ - struct { - char _dummy_bnd[__ADDR_BND_PKEY_PAD]; - void __user *_lower; - void __user *_upper; - } _addr_bnd; - /* used when si_code=SEGV_PKUERR */ - struct { - char _dummy_pkey[__ADDR_BND_PKEY_PAD]; - __u32 _pkey; - } _addr_pkey; - }; - } _sigfault; - - /* SIGPOLL */ - struct { - __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; + union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short _addr_lsb; /* LSB of the reported address */ + /* used when si_code=SEGV_BNDERR */ + struct { + char _dummy_bnd[__ADDR_BND_PKEY_PAD]; + void __user *_lower; + void __user *_upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + struct { + char _dummy_pkey[__ADDR_BND_PKEY_PAD]; + __u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + /* SIGPOLL */ + struct { + __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + /* SIGSYS */ + struct { + void __user *_call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; +}; - /* SIGSYS */ - struct { - void __user *_call_addr; /* calling user insn */ - int _syscall; /* triggering system call number */ - unsigned int _arch; /* AUDIT_ARCH_* of syscall */ - } _sigsys; - } _sifields; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO +#define __SIGINFO \ +struct { \ + int si_signo; \ + int si_errno; \ + int si_code; \ + union __sifields _sifields; \ +} +#else +#define __SIGINFO \ +struct { \ + int si_signo; \ + int si_code; \ + int si_errno; \ + union __sifields _sifields; \ +} +#endif /* __ARCH_HAS_SWAPPED_SIGINFO */ + +typedef struct siginfo { + union { + __SIGINFO; + int _si_pad[SI_MAX_SIZE/sizeof(int)]; + }; } __ARCH_SI_ATTRIBUTES siginfo_t; /* @@ -286,6 +287,12 @@ typedef struct siginfo { #define NSIGSYS 1 /* + * SIGEMT si_codes + */ +#define EMT_TAGOVF 1 /* tag overflow */ +#define NSIGEMT 1 + +/* * sigevent definitions * * It seems likely that SIGEV_THREAD will have to be handled from diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index df4bedb9b01c..538546edbfbd 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee) /* fs/stat.c */ #define __NR_readlinkat 78 __SYSCALL(__NR_readlinkat, sys_readlinkat) +#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) #define __NR3264_fstatat 79 __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat) #define __NR3264_fstat 80 __SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat) +#endif /* fs/sync.c */ #define __NR_sync 81 diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index bfaec6903b8b..b9ba520f7e4b 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -200,6 +200,15 @@ struct binder_node_debug_info { __u32 has_weak_ref; }; +struct binder_node_info_for_ref { + __u32 handle; + __u32 strong_count; + __u32 weak_count; + __u32 reserved1; + __u32 reserved2; + __u32 reserved3; +}; + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) @@ -208,6 +217,7 @@ struct binder_node_debug_info { #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) #define BINDER_VERSION _IOWR('b', 9, struct binder_version) #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) +#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) /* * NOTE: Two special error codes you should check for when calling diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index ff5a5db8906a..8f08ff9bdea0 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -137,8 +137,11 @@ struct blk_zone_range { * sector specified in the report request structure. * @BLKRESETZONE: Reset the write pointer of the zones in the specified * sector range. The sector range must be zone aligned. + * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. */ #define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) #define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) +#define BLKGETZONESZ _IOW(0x12, 132, __u32) +#define BLKGETNRZONES _IOW(0x12, 133, __u32) #endif /* _UAPI_BLKZONED_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 66917a4eba27..852dc17ab47a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -103,6 +103,7 @@ enum bpf_cmd { BPF_BTF_LOAD, BPF_BTF_GET_FD_BY_ID, BPF_TASK_FD_QUERY, + BPF_MAP_LOOKUP_AND_DELETE_ELEM, }; enum bpf_map_type { @@ -127,6 +128,9 @@ enum bpf_map_type { BPF_MAP_TYPE_SOCKHASH, BPF_MAP_TYPE_CGROUP_STORAGE, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + BPF_MAP_TYPE_QUEUE, + BPF_MAP_TYPE_STACK, }; enum bpf_prog_type { @@ -152,6 +156,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LWT_SEG6LOCAL, BPF_PROG_TYPE_LIRC_MODE2, BPF_PROG_TYPE_SK_REUSEPORT, + BPF_PROG_TYPE_FLOW_DISSECTOR, }; enum bpf_attach_type { @@ -172,6 +177,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP4_SENDMSG, BPF_CGROUP_UDP6_SENDMSG, BPF_LIRC_MODE2, + BPF_FLOW_DISSECTOR, __MAX_BPF_ATTACH_TYPE }; @@ -459,6 +465,28 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * + * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * Description + * Push an element *value* in *map*. *flags* is one of: + * + * **BPF_EXIST** + * If the queue/stack is full, the oldest element is removed to + * make room for this. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * Description + * Pop an element from *map*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * Description + * Get an element from *map* without removing it. + * Return + * 0 on success, or a negative error in case of failure. + * * int bpf_probe_read(void *dst, u32 size, const void *src) * Description * For tracing programs, safely attempt to read *size* bytes from @@ -1430,7 +1458,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags) + * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -2141,6 +2169,94 @@ union bpf_attr { * request in the skb. * Return * 0 on success, or a negative error in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for TCP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * Description + * Look for UDP socket matching *tuple*, optionally in a child + * network namespace *netns*. The return value must be checked, + * and if non-NULL, released via **bpf_sk_release**\ (). + * + * The *ctx* should point to the context of the program, such as + * the skb or socket (depending on the hook in use). This is used + * to determine the base network namespace for the lookup. + * + * *tuple_size* must be one of: + * + * **sizeof**\ (*tuple*\ **->ipv4**) + * Look for an IPv4 socket. + * **sizeof**\ (*tuple*\ **->ipv6**) + * Look for an IPv6 socket. + * + * If the *netns* is zero, then the socket lookup table in the + * netns associated with the *ctx* will be used. For the TC hooks, + * this in the netns of the device in the skb. For socket hooks, + * this in the netns of the socket. If *netns* is non-zero, then + * it specifies the ID of the netns relative to the netns + * associated with the *ctx*. + * + * All values for *flags* are reserved for future usage, and must + * be left at zero. + * + * This helper is available only if the kernel was compiled with + * **CONFIG_NET** configuration option. + * Return + * Pointer to *struct bpf_sock*, or NULL in case of failure. + * + * int bpf_sk_release(struct bpf_sock *sk) + * Description + * Release the reference held by *sock*. *sock* must be a non-NULL + * pointer that was returned from bpf_sk_lookup_xxx\ (). + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) + * Description + * For socket policies, insert *len* bytes into msg at offset + * *start*. + * + * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a + * *msg* it may want to insert metadata or options into the msg. + * This can later be read and used by any of the lower layer BPF + * hooks. + * + * This helper may fail if under memory pressure (a malloc + * fails) in these cases BPF programs will get an appropriate + * error and BPF programs will need to handle them. + * + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2226,7 +2342,14 @@ union bpf_attr { FN(get_current_cgroup_id), \ FN(get_local_storage), \ FN(sk_select_reuseport), \ - FN(skb_ancestor_cgroup_id), + FN(skb_ancestor_cgroup_id), \ + FN(sk_lookup_tcp), \ + FN(sk_lookup_udp), \ + FN(sk_release), \ + FN(map_push_elem), \ + FN(map_pop_elem), \ + FN(map_peek_elem), \ + FN(msg_push_data), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2333,6 +2456,7 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; + struct bpf_flow_keys *flow_keys; }; struct bpf_tunnel_key { @@ -2395,6 +2519,23 @@ struct bpf_sock { */ }; +struct bpf_sock_tuple { + union { + struct { + __be32 saddr; + __be32 daddr; + __be16 sport; + __be16 dport; + } ipv4; + struct { + __be32 saddr[4]; + __be32 daddr[4]; + __be16 sport; + __be16 dport; + } ipv6; + }; +}; + #define XDP_PACKET_HEADROOM 256 /* User return codes for XDP prog type. @@ -2778,4 +2919,27 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; +struct bpf_flow_keys { + __u16 nhoff; + __u16 thoff; + __u16 addr_proto; /* ETH_P_* of valid addrs */ + __u8 is_frag; + __u8 is_first_frag; + __u8 is_encap; + __u8 ip_proto; + __be16 n_proto; + __be16 sport; + __be16 dport; + union { + struct { + __be32 ipv4_src; + __be32 ipv4_dst; + }; + struct { + __u32 ipv6_src[4]; /* in6_addr; network order */ + __u32 ipv6_dst[4]; /* in6_addr; network order */ + }; + }; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 19bf0ca6d635..6dafbc3e4414 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -29,6 +29,7 @@ enum { CRYPTO_MSG_UPDATEALG, CRYPTO_MSG_GETALG, CRYPTO_MSG_DELRNG, + CRYPTO_MSG_GETSTAT, __CRYPTO_MSG_MAX }; #define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1) @@ -50,6 +51,16 @@ enum crypto_attr_type_t { CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ + CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */ + CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */ + CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */ + CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */ + CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */ + CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */ + CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */ + CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */ + CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */ + CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -65,6 +76,47 @@ struct crypto_user_alg { __u32 cru_flags; }; +struct crypto_stat { + char type[CRYPTO_MAX_NAME]; + union { + __u32 stat_encrypt_cnt; + __u32 stat_compress_cnt; + __u32 stat_generate_cnt; + __u32 stat_hash_cnt; + __u32 stat_setsecret_cnt; + }; + union { + __u64 stat_encrypt_tlen; + __u64 stat_compress_tlen; + __u64 stat_generate_tlen; + __u64 stat_hash_tlen; + }; + union { + __u32 stat_akcipher_err_cnt; + __u32 stat_cipher_err_cnt; + __u32 stat_compress_err_cnt; + __u32 stat_aead_err_cnt; + __u32 stat_hash_err_cnt; + __u32 stat_rng_err_cnt; + __u32 stat_kpp_err_cnt; + }; + union { + __u32 stat_decrypt_cnt; + __u32 stat_decompress_cnt; + __u32 stat_seed_cnt; + __u32 stat_generate_public_key_cnt; + }; + union { + __u64 stat_decrypt_tlen; + __u64 stat_decompress_tlen; + }; + union { + __u32 stat_verify_cnt; + __u32 stat_compute_shared_secret_cnt; + }; + __u32 stat_sign_cnt; +}; + struct crypto_report_larval { char type[CRYPTO_MAX_NAME]; }; diff --git a/include/uapi/linux/dns_resolver.h b/include/uapi/linux/dns_resolver.h new file mode 100644 index 000000000000..129745f9c794 --- /dev/null +++ b/include/uapi/linux/dns_resolver.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* DNS resolver interface definitions. + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_DNS_RESOLVER_H +#define _UAPI_LINUX_DNS_RESOLVER_H + +#include <linux/types.h> + +/* + * Type of payload. + */ +enum dns_payload_content_type { + DNS_PAYLOAD_IS_SERVER_LIST = 0, /* List of servers, requested by srv=1 */ +}; + +/* + * Type of address that might be found in an address record. + */ +enum dns_payload_address_type { + DNS_ADDRESS_IS_IPV4 = 0, /* 4-byte AF_INET address */ + DNS_ADDRESS_IS_IPV6 = 1, /* 16-byte AF_INET6 address */ +}; + +/* + * Type of protocol used to access a server. + */ +enum dns_payload_protocol_type { + DNS_SERVER_PROTOCOL_UNSPECIFIED = 0, + DNS_SERVER_PROTOCOL_UDP = 1, /* Use UDP to talk to the server */ + DNS_SERVER_PROTOCOL_TCP = 2, /* Use TCP to talk to the server */ +}; + +/* + * Source of record included in DNS resolver payload. + */ +enum dns_record_source { + DNS_RECORD_UNAVAILABLE = 0, /* No source available (empty record) */ + DNS_RECORD_FROM_CONFIG = 1, /* From local configuration data */ + DNS_RECORD_FROM_DNS_A = 2, /* From DNS A or AAAA record */ + DNS_RECORD_FROM_DNS_AFSDB = 3, /* From DNS AFSDB record */ + DNS_RECORD_FROM_DNS_SRV = 4, /* From DNS SRV record */ + DNS_RECORD_FROM_NSS = 5, /* From NSS */ + NR__dns_record_source +}; + +/* + * Status of record included in DNS resolver payload. + */ +enum dns_lookup_status { + DNS_LOOKUP_NOT_DONE = 0, /* No lookup has been made */ + DNS_LOOKUP_GOOD = 1, /* Good records obtained */ + DNS_LOOKUP_GOOD_WITH_BAD = 2, /* Good records, some decoding errors */ + DNS_LOOKUP_BAD = 3, /* Couldn't decode results */ + DNS_LOOKUP_GOT_NOT_FOUND = 4, /* Got a "Not Found" result */ + DNS_LOOKUP_GOT_LOCAL_FAILURE = 5, /* Local failure during lookup */ + DNS_LOOKUP_GOT_TEMP_FAILURE = 6, /* Temporary failure during lookup */ + DNS_LOOKUP_GOT_NS_FAILURE = 7, /* Name server failure */ + NR__dns_lookup_status +}; + +/* + * Header at the beginning of binary format payload. + */ +struct dns_payload_header { + __u8 zero; /* Zero byte: marks this as not being text */ + __u8 content; /* enum dns_payload_content_type */ + __u8 version; /* Encoding version */ +} __packed; + +/* + * Header at the beginning of a V1 server list. This is followed directly by + * the server records. Each server records begins with a struct of type + * dns_server_list_v1_server. + */ +struct dns_server_list_v1_header { + struct dns_payload_header hdr; + __u8 source; /* enum dns_record_source */ + __u8 status; /* enum dns_lookup_status */ + __u8 nr_servers; /* Number of server records following this */ +} __packed; + +/* + * Header at the beginning of each V1 server record. This is followed by the + * characters of the name with no NUL-terminator, followed by the address + * records for that server. Each address record begins with a struct of type + * struct dns_server_list_v1_address. + */ +struct dns_server_list_v1_server { + __u16 name_len; /* Length of name (LE) */ + __u16 priority; /* Priority (as SRV record) (LE) */ + __u16 weight; /* Weight (as SRV record) (LE) */ + __u16 port; /* UDP/TCP port number (LE) */ + __u8 source; /* enum dns_record_source */ + __u8 status; /* enum dns_lookup_status */ + __u8 protocol; /* enum dns_payload_protocol_type */ + __u8 nr_addrs; +} __packed; + +/* + * Header at the beginning of each V1 address record. This is followed by the + * bytes of the address, 4 for IPV4 and 16 for IPV6. + */ +struct dns_server_list_v1_address { + __u8 address_type; /* enum dns_payload_address_type */ +} __packed; + +#endif /* _UAPI_LINUX_DNS_RESOLVER_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index dc69391d2bba..c8f8e2455bf3 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -91,10 +91,6 @@ * %ETHTOOL_GSET to get the current values before making specific * changes and then applying them with %ETHTOOL_SSET. * - * Drivers that implement set_settings() should validate all fields - * other than @cmd that are not described as read-only or deprecated, - * and must ignore all fields described as read-only. - * * Deprecated fields should be ignored by both users and drivers. */ struct ethtool_cmd { @@ -1800,14 +1796,9 @@ enum ethtool_reset_flags { * rejected. * * Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt - * are not available in %ethtool_link_settings. Until all drivers are - * converted to ignore them or to the new %ethtool_link_settings API, - * for both queries and changes, users should always try - * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick - * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it - * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and - * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing - * either %ethtool_cmd or %ethtool_link_settings). + * are not available in %ethtool_link_settings. These fields will be + * always set to zero in %ETHTOOL_GSET reply and %ETHTOOL_SSET will + * fail if any of them is set to non-zero value. * * Users should assume that all fields not marked read-only are * writable and subject to validation by the driver. They should use diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index 1db453e4b550..1acd2b179aef 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -47,11 +47,11 @@ #define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09 /** - * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types + * struct fw_cdev_event_common - Common part of all fw_cdev_event_* types * @closure: For arbitrary use by userspace - * @type: Discriminates the fw_cdev_event_ types + * @type: Discriminates the fw_cdev_event_* types * - * This struct may be used to access generic members of all fw_cdev_event_ + * This struct may be used to access generic members of all fw_cdev_event_* * types regardless of the specific type. * * Data passed in the @closure field for a request will be returned in the @@ -123,7 +123,13 @@ struct fw_cdev_event_response { /** * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 + * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST + * @tcode: Transaction code of the incoming request + * @offset: The offset into the 48-bit per-node address space + * @handle: Reference to the kernel-side pending request + * @length: Data length, i.e. the request's payload size in bytes + * @data: Incoming data, if any * * This event is sent instead of &fw_cdev_event_request2 if the kernel or * the client implements ABI version <= 3. &fw_cdev_event_request lacks @@ -353,7 +359,7 @@ struct fw_cdev_event_phy_packet { }; /** - * union fw_cdev_event - Convenience union of fw_cdev_event_ types + * union fw_cdev_event - Convenience union of fw_cdev_event_* types * @common: Valid for all types * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE @@ -735,7 +741,7 @@ struct fw_cdev_set_iso_channels { * @header: Header and payload in case of a transmit context. * * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. - * Use the FW_CDEV_ISO_ macros to fill in @control. + * Use the FW_CDEV_ISO_* macros to fill in @control. * The @header array is empty in case of receive contexts. * * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT: @@ -842,7 +848,7 @@ struct fw_cdev_queue_iso { * the %FW_CDEV_ISO_SYNC bit set * @tags: Tag filter bit mask. Only valid for isochronous reception. * Determines the tag values for which packets will be accepted. - * Use FW_CDEV_ISO_CONTEXT_MATCH_ macros to set @tags. + * Use FW_CDEV_ISO_CONTEXT_MATCH_* macros to set @tags. * @handle: Isochronous context handle within which to transmit or receive */ struct fw_cdev_start_iso { @@ -1009,8 +1015,8 @@ struct fw_cdev_send_stream_packet { * on the same card as this device. After transmission, an * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated. * - * The payload @data[] shall be specified in host byte order. Usually, - * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets + * The payload @data\[\] shall be specified in host byte order. Usually, + * @data\[1\] needs to be the bitwise inverse of @data\[0\]. VersaPHY packets * are an exception to this rule. * * The ioctl is only permitted on device files which represent a local node. diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 73e01918f996..a441ea1bfe6d 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -279,8 +279,8 @@ struct fsxattr { #define FS_ENCRYPTION_MODE_AES_256_CTS 4 #define FS_ENCRYPTION_MODE_AES_128_CBC 5 #define FS_ENCRYPTION_MODE_AES_128_CTS 6 -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ struct fscrypt_policy { __u8 version; diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h index 24a861c0d29d..065408e16a80 100644 --- a/include/uapi/linux/gen_stats.h +++ b/include/uapi/linux/gen_stats.h @@ -12,6 +12,7 @@ enum { TCA_STATS_APP, TCA_STATS_RATE_EST64, TCA_STATS_PAD, + TCA_STATS_BASIC_HW, __TCA_STATS_MAX, }; #define TCA_STATS_MAX (__TCA_STATS_MAX - 1) diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h index 1bf6e6df084b..4ebfe0ac6c5b 100644 --- a/include/uapi/linux/gpio.h +++ b/include/uapi/linux/gpio.h @@ -65,7 +65,7 @@ struct gpioline_info { /** * struct gpiohandle_request - Information about a GPIO handle request - * @lineoffsets: an array desired lines, specified by offset index for the + * @lineoffsets: an array of desired lines, specified by offset index for the * associated GPIO device * @flags: desired flags for the desired GPIO lines, such as * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index ebaf5701c9db..dfcf3ce0097f 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -34,6 +34,7 @@ enum { IFA_MULTICAST, IFA_FLAGS, IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ + IFA_TARGET_NETNSID, __IFA_MAX, }; diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h index 4605527ca41b..c3cc5a9e5eaf 100644 --- a/include/uapi/linux/if_arp.h +++ b/include/uapi/linux/if_arp.h @@ -114,18 +114,18 @@ /* ARP ioctl request. */ struct arpreq { - struct sockaddr arp_pa; /* protocol address */ - struct sockaddr arp_ha; /* hardware address */ - int arp_flags; /* flags */ - struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ - char arp_dev[16]; + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ + struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ + char arp_dev[IFNAMSIZ]; }; struct arpreq_old { - struct sockaddr arp_pa; /* protocol address */ - struct sockaddr arp_ha; /* hardware address */ - int arp_flags; /* flags */ - struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ + struct sockaddr arp_netmask; /* netmask (only for proxy arps) */ }; /* ARP Flag values. */ diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h index 75eed8b62823..7239aa9c0766 100644 --- a/include/uapi/linux/if_fddi.h +++ b/include/uapi/linux/if_fddi.h @@ -6,9 +6,10 @@ * * Global definitions for the ANSI FDDI interface. * - * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004 + * Version: @(#)if_fddi.h 1.0.3 Oct 6 2018 * - * Author: Lawrence V. Stefani, <stefani@lkg.dec.com> + * Author: Lawrence V. Stefani, <stefani@yahoo.com> + * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org> * * if_fddi.h is based on previous if_ether.h and if_tr.h work by * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> @@ -45,7 +46,21 @@ #define FDDI_K_OUI_LEN 3 /* Octets in OUI in 802.2 SNAP header */ -/* Define FDDI Frame Control (FC) Byte values */ +/* Define FDDI Frame Control (FC) Byte masks */ +#define FDDI_FC_K_CLASS_MASK 0x80 /* class bit */ +#define FDDI_FC_K_CLASS_SYNC 0x80 +#define FDDI_FC_K_CLASS_ASYNC 0x00 +#define FDDI_FC_K_ALEN_MASK 0x40 /* address length bit */ +#define FDDI_FC_K_ALEN_48 0x40 +#define FDDI_FC_K_ALEN_16 0x00 +#define FDDI_FC_K_FORMAT_MASK 0x30 /* format bits */ +#define FDDI_FC_K_FORMAT_FUTURE 0x30 +#define FDDI_FC_K_FORMAT_IMPLEMENTOR 0x20 +#define FDDI_FC_K_FORMAT_LLC 0x10 +#define FDDI_FC_K_FORMAT_MANAGEMENT 0x00 +#define FDDI_FC_K_CONTROL_MASK 0x0f /* control bits */ + +/* Define FDDI Frame Control (FC) Byte specific values */ #define FDDI_FC_K_VOID 0x00 #define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80 #define FDDI_FC_K_RESTRICTED_TOKEN 0xC0 diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 43391e2d1153..1debfa42cba1 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -161,6 +161,7 @@ enum { IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, + IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */ IFLA_CARRIER_UP_COUNT, IFLA_CARRIER_DOWN_COUNT, IFLA_NEW_IFINDEX, @@ -286,6 +287,7 @@ enum { IFLA_BR_MCAST_STATS_ENABLED, IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, + IFLA_BR_VLAN_STATS_PER_PORT, __IFLA_BR_MAX, }; @@ -554,6 +556,7 @@ enum { IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, + IFLA_GENEVE_TTL_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index 67b61d91d89b..467b654bd4c7 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -57,6 +57,7 @@ struct sockaddr_ll { #define PACKET_QDISC_BYPASS 20 #define PACKET_ROLLOVER_STATS 21 #define PACKET_FANOUT_DATA 22 +#define PACKET_IGNORE_OUTGOING 23 #define PACKET_FANOUT_HASH 0 #define PACKET_FANOUT_LB 1 diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index ed291e55f024..71d82fe15b03 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -177,6 +177,7 @@ struct in6_flowlabel_req { #define IPV6_V6ONLY 26 #define IPV6_JOIN_ANYCAST 27 #define IPV6_LEAVE_ANYCAST 28 +#define IPV6_MULTICAST_ALL 29 /* IPV6_MTU_DISCOVER values */ #define IPV6_PMTUDISC_DONT 0 diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 53fbae27b280..6d180cc60a5d 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -708,6 +708,15 @@ #define REL_DIAL 0x07 #define REL_WHEEL 0x08 #define REL_MISC 0x09 +/* + * 0x0a is reserved and should not be used in input drivers. + * It was used by HID as REL_MISC+1 and userspace needs to detect if + * the next REL_* event is correct or is just REL_MISC + n. + * We define here REL_RESERVED so userspace can rely on it and detect + * the situation described above. + */ +#define REL_RESERVED 0x0a +#define REL_WHEEL_HI_RES 0x0b #define REL_MAX 0x0f #define REL_CNT (REL_MAX+1) @@ -744,6 +753,15 @@ #define ABS_MISC 0x28 +/* + * 0x2e is reserved and should not be used in input drivers. + * It was used by HID as ABS_MISC+6 and userspace needs to detect if + * the next ABS_* event is correct or is just ABS_MISC + n. + * We define here ABS_RESERVED so userspace can rely on it and detect + * the situation described above. + */ +#define ABS_RESERVED 0x2e + #define ABS_MT_SLOT 0x2f /* MT slot being modified */ #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 7b8c9e19bad1..0f3cb13db8e9 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -65,7 +65,12 @@ /* keyctl structures */ struct keyctl_dh_params { - __s32 private; + union { +#ifndef __cplusplus + __s32 private; +#endif + __s32 priv; + }; __s32 prime; __s32 base; }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 251be353f950..2b7a652c9fa4 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -420,13 +420,19 @@ struct kvm_run { struct kvm_coalesced_mmio_zone { __u64 addr; __u32 size; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; }; struct kvm_coalesced_mmio { __u64 phys_addr; __u32 len; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; __u8 data[8]; }; @@ -719,6 +725,7 @@ struct kvm_ppc_one_seg_page_size { #define KVM_PPC_PAGE_SIZES_REAL 0x00000001 #define KVM_PPC_1T_SEGMENTS 0x00000002 +#define KVM_PPC_NO_HASH 0x00000004 struct kvm_ppc_smmu_info { __u64 flags; @@ -751,6 +758,15 @@ struct kvm_ppc_resize_hpt { #define KVM_S390_SIE_PAGE_OFFSET 1 /* + * On arm64, machine type can be used to request the physical + * address size for the VM. Bits[7-0] are reserved for the guest + * PA size shift (i.e, log2(PA_Size)). For backward compatibility, + * value 0 implies the default IPA size, 40bits. + */ +#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL +#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \ + ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK) +/* * ioctls for /dev/kvm fds: */ #define KVM_GET_API_VERSION _IO(KVMIO, 0x00) @@ -953,6 +969,12 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_MSR_PLATFORM_INFO 159 +#define KVM_CAP_PPC_NESTED_HV 160 +#define KVM_CAP_HYPERV_SEND_IPI 161 +#define KVM_CAP_COALESCED_PIO 162 +#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 +#define KVM_CAP_EXCEPTION_PAYLOAD 164 +#define KVM_CAP_ARM_VM_IPA_SIZE 165 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 1a6fee974116..96c24478d8ce 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -29,6 +29,7 @@ #define HPFS_SUPER_MAGIC 0xf995e849 #define ISOFS_SUPER_MAGIC 0x9660 #define JFFS2_SUPER_MAGIC 0x72b6 +#define XFS_SUPER_MAGIC 0x58465342 /* "XFSB" */ #define PSTOREFS_MAGIC 0x6165676C #define EFIVARFS_MAGIC 0xde5e81e4 #define HOSTFS_SUPER_MAGIC 0x00c0ffee diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h index 015a4c0bbb47..7a8a26751c23 100644 --- a/include/uapi/linux/memfd.h +++ b/include/uapi/linux/memfd.h @@ -25,7 +25,9 @@ #define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB #define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB #define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB +#define MFD_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB #define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB +#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB #define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB #define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB #define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h index bfd5938fede6..d0f515d53299 100644 --- a/include/uapi/linux/mman.h +++ b/include/uapi/linux/mman.h @@ -28,7 +28,9 @@ #define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB #define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB #define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB +#define MAP_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB +#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB #define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB #define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB #define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h index 4c292ecbb748..0a26a5576645 100644 --- a/include/uapi/linux/ncsi.h +++ b/include/uapi/linux/ncsi.h @@ -23,6 +23,9 @@ * optionally the preferred NCSI_ATTR_CHANNEL_ID. * @NCSI_CMD_CLEAR_INTERFACE: clear any preferred package/channel combination. * Requires NCSI_ATTR_IFINDEX. + * @NCSI_CMD_SEND_CMD: send NC-SI command to network card. + * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID + * and NCSI_ATTR_CHANNEL_ID. * @NCSI_CMD_MAX: highest command number */ enum ncsi_nl_commands { @@ -30,6 +33,7 @@ enum ncsi_nl_commands { NCSI_CMD_PKG_INFO, NCSI_CMD_SET_INTERFACE, NCSI_CMD_CLEAR_INTERFACE, + NCSI_CMD_SEND_CMD, __NCSI_CMD_AFTER_LAST, NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1 @@ -43,6 +47,7 @@ enum ncsi_nl_commands { * @NCSI_ATTR_PACKAGE_LIST: nested array of NCSI_PKG_ATTR attributes * @NCSI_ATTR_PACKAGE_ID: package ID * @NCSI_ATTR_CHANNEL_ID: channel ID + * @NCSI_ATTR_DATA: command payload * @NCSI_ATTR_MAX: highest attribute number */ enum ncsi_nl_attrs { @@ -51,6 +56,7 @@ enum ncsi_nl_attrs { NCSI_ATTR_PACKAGE_LIST, NCSI_ATTR_PACKAGE_ID, NCSI_ATTR_CHANNEL_ID, + NCSI_ATTR_DATA, __NCSI_ATTR_AFTER_LAST, NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1 diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 7e27070b9440..f57c9e434d2d 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -128,37 +128,31 @@ enum { static inline const char *nvdimm_bus_cmd_name(unsigned cmd) { - static const char * const names[] = { - [ND_CMD_ARS_CAP] = "ars_cap", - [ND_CMD_ARS_START] = "ars_start", - [ND_CMD_ARS_STATUS] = "ars_status", - [ND_CMD_CLEAR_ERROR] = "clear_error", - [ND_CMD_CALL] = "cmd_call", - }; - - if (cmd < ARRAY_SIZE(names) && names[cmd]) - return names[cmd]; - return "unknown"; + switch (cmd) { + case ND_CMD_ARS_CAP: return "ars_cap"; + case ND_CMD_ARS_START: return "ars_start"; + case ND_CMD_ARS_STATUS: return "ars_status"; + case ND_CMD_CLEAR_ERROR: return "clear_error"; + case ND_CMD_CALL: return "cmd_call"; + default: return "unknown"; + } } static inline const char *nvdimm_cmd_name(unsigned cmd) { - static const char * const names[] = { - [ND_CMD_SMART] = "smart", - [ND_CMD_SMART_THRESHOLD] = "smart_thresh", - [ND_CMD_DIMM_FLAGS] = "flags", - [ND_CMD_GET_CONFIG_SIZE] = "get_size", - [ND_CMD_GET_CONFIG_DATA] = "get_data", - [ND_CMD_SET_CONFIG_DATA] = "set_data", - [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size", - [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log", - [ND_CMD_VENDOR] = "vendor", - [ND_CMD_CALL] = "cmd_call", - }; - - if (cmd < ARRAY_SIZE(names) && names[cmd]) - return names[cmd]; - return "unknown"; + switch (cmd) { + case ND_CMD_SMART: return "smart"; + case ND_CMD_SMART_THRESHOLD: return "smart_thresh"; + case ND_CMD_DIMM_FLAGS: return "flags"; + case ND_CMD_GET_CONFIG_SIZE: return "get_size"; + case ND_CMD_GET_CONFIG_DATA: return "get_data"; + case ND_CMD_SET_CONFIG_DATA: return "set_data"; + case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size"; + case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log"; + case ND_CMD_VENDOR: return "vendor"; + case ND_CMD_CALL: return "cmd_call"; + default: return "unknown"; + } } #define ND_IOCTL 'N' @@ -208,10 +202,6 @@ enum nd_driver_flags { ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM, }; -enum { - ND_MIN_NAMESPACE_SIZE = PAGE_SIZE, -}; - enum ars_masks { ARS_STATUS_MASK = 0x0000FFFF, ARS_EXT_STATUS_SHIFT = 16, diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 904db6148476..998155444e0d 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -43,6 +43,7 @@ enum { #define NTF_PROXY 0x08 /* == ATF_PUBL */ #define NTF_EXT_LEARNED 0x10 #define NTF_OFFLOADED 0x20 +#define NTF_STICKY 0x40 #define NTF_ROUTER 0x80 /* diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e23290ffdc77..579974b0bf0d 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -826,12 +826,14 @@ enum nft_meta_keys { * @NFT_RT_NEXTHOP4: routing nexthop for IPv4 * @NFT_RT_NEXTHOP6: routing nexthop for IPv6 * @NFT_RT_TCPMSS: fetch current path tcp mss + * @NFT_RT_XFRM: boolean, skb->dst->xfrm != NULL */ enum nft_rt_keys { NFT_RT_CLASSID, NFT_RT_NEXTHOP4, NFT_RT_NEXTHOP6, NFT_RT_TCPMSS, + NFT_RT_XFRM, __NFT_RT_MAX }; #define NFT_RT_MAX (__NFT_RT_MAX - 1) @@ -1175,6 +1177,21 @@ enum nft_quota_attributes { #define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1) /** + * enum nft_secmark_attributes - nf_tables secmark object netlink attributes + * + * @NFTA_SECMARK_CTX: security context (NLA_STRING) + */ +enum nft_secmark_attributes { + NFTA_SECMARK_UNSPEC, + NFTA_SECMARK_CTX, + __NFTA_SECMARK_MAX, +}; +#define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1) + +/* Max security context length */ +#define NFT_SECMARK_CTX_MAXLEN 256 + +/** * enum nft_reject_types - nf_tables reject expression reject types * * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable @@ -1430,7 +1447,8 @@ enum nft_ct_timeout_timeout_attributes { #define NFT_OBJECT_CONNLIMIT 5 #define NFT_OBJECT_TUNNEL 6 #define NFT_OBJECT_CT_TIMEOUT 7 -#define __NFT_OBJECT_MAX 8 +#define NFT_OBJECT_SECMARK 8 +#define __NFT_OBJECT_MAX 9 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** @@ -1493,9 +1511,16 @@ enum nft_flowtable_hook_attributes { }; #define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1) +/** + * enum nft_osf_attributes - nftables osf expression netlink attributes + * + * @NFTA_OSF_DREG: destination register (NLA_U32: nft_registers) + * @NFTA_OSF_TTL: Value of the TTL osf option (NLA_U8) + */ enum nft_osf_attributes { NFTA_OSF_UNSPEC, NFTA_OSF_DREG, + NFTA_OSF_TTL, __NFTA_OSF_MAX, }; #define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1) @@ -1512,6 +1537,35 @@ enum nft_devices_attributes { }; #define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1) +/* + * enum nft_xfrm_attributes - nf_tables xfrm expr netlink attributes + * + * @NFTA_XFRM_DREG: destination register (NLA_U32) + * @NFTA_XFRM_KEY: enum nft_xfrm_keys (NLA_U32) + * @NFTA_XFRM_DIR: direction (NLA_U8) + * @NFTA_XFRM_SPNUM: index in secpath array (NLA_U32) + */ +enum nft_xfrm_attributes { + NFTA_XFRM_UNSPEC, + NFTA_XFRM_DREG, + NFTA_XFRM_KEY, + NFTA_XFRM_DIR, + NFTA_XFRM_SPNUM, + __NFTA_XFRM_MAX +}; +#define NFTA_XFRM_MAX (__NFTA_XFRM_MAX - 1) + +enum nft_xfrm_keys { + NFT_XFRM_KEY_UNSPEC, + NFT_XFRM_KEY_DADDR_IP4, + NFT_XFRM_KEY_DADDR_IP6, + NFT_XFRM_KEY_SADDR_IP4, + NFT_XFRM_KEY_SADDR_IP6, + NFT_XFRM_KEY_REQID, + NFT_XFRM_KEY_SPI, + __NFT_XFRM_KEY_MAX, +}; +#define NFT_XFRM_KEY_MAX (__NFT_XFRM_KEY_MAX - 1) /** * enum nft_trace_attributes - nf_tables trace netlink attributes diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h index e96dfa1b34f7..b74e370d6133 100644 --- a/include/uapi/linux/netfilter/xt_cgroup.h +++ b/include/uapi/linux/netfilter/xt_cgroup.h @@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 { void *priv __attribute__((aligned(8))); }; +#define XT_CGROUP_PATH_MAX 512 + +struct xt_cgroup_info_v2 { + __u8 has_path; + __u8 has_classid; + __u8 invert_path; + __u8 invert_classid; + union { + char path[XT_CGROUP_PATH_MAX]; + __u32 classid; + }; + + /* kernel internal data */ + void *priv __attribute__((aligned(8))); +}; + #endif /* _UAPI_XT_CGROUP_H */ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 776bc92e9118..486ed1f0c0bc 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -155,6 +155,7 @@ enum nlmsgerr_attrs { #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 +#define NETLINK_DUMP_STRICT_CHK 12 struct nl_pktinfo { __u32 group; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 7acc16f34942..6d610bae30a9 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1033,6 +1033,9 @@ * %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its * address(specified in %NL80211_ATTR_MAC). * + * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in + * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1245,6 +1248,8 @@ enum nl80211_commands { NL80211_CMD_CONTROL_PORT_FRAME, + NL80211_CMD_GET_FTM_RESPONDER_STATS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2241,6 +2246,14 @@ enum nl80211_commands { * association request when used with NL80211_CMD_NEW_STATION). Can be set * only if %NL80211_STA_FLAG_WME is set. * + * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include + * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing + * measurement (FTM) responder functionality and containing parameters as + * possible, see &enum nl80211_ftm_responder_attr + * + * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder + * statistics, see &enum nl80211_ftm_responder_stats. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2682,6 +2695,10 @@ enum nl80211_attrs { NL80211_ATTR_HE_CAPABILITY, + NL80211_ATTR_FTM_RESPONDER, + + NL80211_ATTR_FTM_RESPONDER_STATS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3050,8 +3067,13 @@ enum nl80211_sta_bss_param { * received from the station (u64, usec) * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment * @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm) - * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data) - * ACK frame (s8, dBm) + * @NL80211_STA_INFO_ACK_SIGNAL_AVG: avg signal strength of ACK frames (s8, dBm) + * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs) + * (u32, from this station) + * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received + * with an FCS error (u32, from this station). This count may not include + * some packets with an FCS error due to TA corruption. Hence this counter + * might not be fully accurate. * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3091,13 +3113,19 @@ enum nl80211_sta_info { NL80211_STA_INFO_RX_DURATION, NL80211_STA_INFO_PAD, NL80211_STA_INFO_ACK_SIGNAL, - NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG, + NL80211_STA_INFO_ACK_SIGNAL_AVG, + NL80211_STA_INFO_RX_MPDUS, + NL80211_STA_INFO_FCS_ERROR_COUNT, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1 }; +/* we renamed this - stay compatible */ +#define NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG NL80211_STA_INFO_ACK_SIGNAL_AVG + + /** * enum nl80211_tid_stats - per TID statistics attributes * @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved @@ -4338,7 +4366,7 @@ enum nl80211_txrate_gi { * enum nl80211_band - Frequency band * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) - * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) + * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -5213,9 +5241,8 @@ enum nl80211_feature_flags { * "radar detected" event. * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and * receiving control port frames over nl80211 instead of the netdevice. - * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack - * rssi if firmware support, this flag is to intimate about ack rssi - * support to nl80211. + * @NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT: This driver/device supports + * (average) ACK signal strength reporting. * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate * TXQs. * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the @@ -5223,6 +5250,13 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data * except for supported rates from the probe request content if requested * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag. + * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine + * timing measurement responder role. + * + * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are + * able to rekey an in-use key correctly. Userspace must not rekey PTK keys + * if this flag is not set. Ignoring this can leak clear text packets and/or + * freeze the connection. * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. @@ -5255,10 +5289,14 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN, NL80211_EXT_FEATURE_DFS_OFFLOAD, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211, - NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT, + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT, + /* we renamed this - stay compatible */ + NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT, NL80211_EXT_FEATURE_TXQS, NL80211_EXT_FEATURE_SCAN_RANDOM_SN, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT, + NL80211_EXT_FEATURE_CAN_REPLACE_PTK0, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -5798,4 +5836,74 @@ enum nl80211_external_auth_action { NL80211_EXTERNAL_AUTH_ABORT, }; +/** + * enum nl80211_ftm_responder_attributes - fine timing measurement + * responder attributes + * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid + * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled + * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element + * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10) + * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element + * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13) + * @__NL80211_FTM_RESP_ATTR_LAST: Internal + * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute. + */ +enum nl80211_ftm_responder_attributes { + __NL80211_FTM_RESP_ATTR_INVALID, + + NL80211_FTM_RESP_ATTR_ENABLED, + NL80211_FTM_RESP_ATTR_LCI, + NL80211_FTM_RESP_ATTR_CIVICLOC, + + /* keep last */ + __NL80211_FTM_RESP_ATTR_LAST, + NL80211_FTM_RESP_ATTR_MAX = __NL80211_FTM_RESP_ATTR_LAST - 1, +}; + +/* + * enum nl80211_ftm_responder_stats - FTM responder statistics + * + * These attribute types are used with %NL80211_ATTR_FTM_RESPONDER_STATS + * when getting FTM responder statistics. + * + * @__NL80211_FTM_STATS_INVALID: attribute number 0 is reserved + * @NL80211_FTM_STATS_SUCCESS_NUM: number of FTM sessions in which all frames + * were ssfully answered (u32) + * @NL80211_FTM_STATS_PARTIAL_NUM: number of FTM sessions in which part of the + * frames were successfully answered (u32) + * @NL80211_FTM_STATS_FAILED_NUM: number of failed FTM sessions (u32) + * @NL80211_FTM_STATS_ASAP_NUM: number of ASAP sessions (u32) + * @NL80211_FTM_STATS_NON_ASAP_NUM: number of non-ASAP sessions (u32) + * @NL80211_FTM_STATS_TOTAL_DURATION_MSEC: total sessions durations - gives an + * indication of how much time the responder was busy (u64, msec) + * @NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM: number of unknown FTM triggers - + * triggers from initiators that didn't finish successfully the negotiation + * phase with the responder (u32) + * @NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM: number of FTM reschedule requests + * - initiator asks for a new scheduling although it already has scheduled + * FTM slot (u32) + * @NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM: number of FTM triggers out of + * scheduled window (u32) + * @NL80211_FTM_STATS_PAD: used for padding, ignore + * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal + * @NL80211_FTM_STATS_MAX: highest possible FTM responder stats attribute + */ +enum nl80211_ftm_responder_stats { + __NL80211_FTM_STATS_INVALID, + NL80211_FTM_STATS_SUCCESS_NUM, + NL80211_FTM_STATS_PARTIAL_NUM, + NL80211_FTM_STATS_FAILED_NUM, + NL80211_FTM_STATS_ASAP_NUM, + NL80211_FTM_STATS_NON_ASAP_NUM, + NL80211_FTM_STATS_TOTAL_DURATION_MSEC, + NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM, + NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM, + NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM, + NL80211_FTM_STATS_PAD, + + /* keep last */ + __NL80211_FTM_STATS_AFTER_LAST, + NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index ee556ccc93f4..e1e9888c85e6 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -52,6 +52,7 @@ #define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */ #define PCI_STATUS 0x06 /* 16 bits */ +#define PCI_STATUS_IMM_READY 0x01 /* Immediate Readiness */ #define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */ #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ #define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index be382fb0592d..401d0c1e612d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -483,6 +483,8 @@ enum { TCA_FLOWER_KEY_ENC_OPTS, TCA_FLOWER_KEY_ENC_OPTS_MASK, + TCA_FLOWER_IN_HW_COUNT, + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 8975fd1a1421..89ee47c2f17d 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -395,9 +395,9 @@ enum { struct tc_htb_xstats { __u32 lends; __u32 borrows; - __u32 giants; /* too big packets (rate will not be accurate) */ - __u32 tokens; - __u32 ctokens; + __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */ + __s32 tokens; + __s32 ctokens; }; /* HFSC section */ @@ -1084,4 +1084,50 @@ enum { CAKE_ATM_MAX }; + +/* TAPRIO */ +enum { + TC_TAPRIO_CMD_SET_GATES = 0x00, + TC_TAPRIO_CMD_SET_AND_HOLD = 0x01, + TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02, +}; + +enum { + TCA_TAPRIO_SCHED_ENTRY_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */ + TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */ + TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */ + __TCA_TAPRIO_SCHED_ENTRY_MAX, +}; +#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1) + +/* The format for schedule entry list is: + * [TCA_TAPRIO_SCHED_ENTRY_LIST] + * [TCA_TAPRIO_SCHED_ENTRY] + * [TCA_TAPRIO_SCHED_ENTRY_CMD] + * [TCA_TAPRIO_SCHED_ENTRY_GATES] + * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] + */ +enum { + TCA_TAPRIO_SCHED_UNSPEC, + TCA_TAPRIO_SCHED_ENTRY, + __TCA_TAPRIO_SCHED_MAX, +}; + +#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1) + +enum { + TCA_TAPRIO_ATTR_UNSPEC, + TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ + TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */ + TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */ + TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */ + TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */ + TCA_TAPRIO_PAD, + __TCA_TAPRIO_ATTR_MAX, +}; + +#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1) + #endif diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index b479db5c71d9..34dd3d497f2c 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -301,6 +301,7 @@ enum sctp_sinfo_flags { SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */ /* 2 bits here have been used by SCTP_PR_SCTP_MASK */ SCTP_SENDALL = (1 << 6), + SCTP_PR_SCTP_ALL = (1 << 7), SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */ SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */ }; diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h index dde1344f047c..6507ad0afc81 100644 --- a/include/uapi/linux/shm.h +++ b/include/uapi/linux/shm.h @@ -65,7 +65,9 @@ struct shmid_ds { #define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB #define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB #define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB +#define SHM_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB #define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB +#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB #define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB #define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB #define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h index ac9e8c96d9bd..8cb3a6fef553 100644 --- a/include/uapi/linux/smc_diag.h +++ b/include/uapi/linux/smc_diag.h @@ -18,14 +18,17 @@ struct smc_diag_req { * on the internal clcsock, and more SMC-related socket data */ struct smc_diag_msg { - __u8 diag_family; - __u8 diag_state; - __u8 diag_mode; - __u8 diag_shutdown; + __u8 diag_family; + __u8 diag_state; + union { + __u8 diag_mode; + __u8 diag_fallback; /* the old name of the field */ + }; + __u8 diag_shutdown; struct inet_diag_sockid id; - __u32 diag_uid; - __u64 diag_inode; + __u32 diag_uid; + __aligned_u64 diag_inode; }; /* Mode of a connection */ @@ -99,11 +102,11 @@ struct smc_diag_fallback { }; struct smcd_diag_dmbinfo { /* SMC-D Socket internals */ - __u32 linkid; /* Link identifier */ - __u64 peer_gid; /* Peer GID */ - __u64 my_gid; /* My GID */ - __u64 token; /* Token of DMB */ - __u64 peer_token; /* Token of remote DMBE */ + __u32 linkid; /* Link identifier */ + __aligned_u64 peer_gid; /* Peer GID */ + __aligned_u64 my_gid; /* My GID */ + __aligned_u64 token; /* Token of DMB */ + __aligned_u64 peer_token; /* Token of remote DMBE */ }; #endif /* _UAPI_SMC_DIAG_H_ */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index b7aa7bb2349f..5e8ca16a9079 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -34,7 +34,7 @@ */ -#define TASKSTATS_VERSION 8 +#define TASKSTATS_VERSION 9 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -164,6 +164,10 @@ struct taskstats { /* Delay waiting for memory reclaim */ __u64 freepages_count; __u64 freepages_delay_total; + + /* Delay waiting for thrashing page */ + __u64 thrashing_count; + __u64 thrashing_delay_total; }; diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 09d00f8c442b..09502de447f5 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -40,5 +40,6 @@ struct udphdr { #define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */ #define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */ #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ +#define UDP_ENCAP_RXRPC 6 #endif /* _UAPI_LINUX_UDP_H */ diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h index 729af2f861a4..fdd4d88a7b95 100644 --- a/include/uapi/linux/usb/tmc.h +++ b/include/uapi/linux/usb/tmc.h @@ -4,6 +4,7 @@ * Copyright (C) 2008 Novell, Inc. * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (C) 2015 Dave Penkler <dpenkler@gmail.com> + * Copyright (C) 2018 IVI Foundation, Inc. * * This file holds USB constants defined by the USB Device Class * and USB488 Subclass Definitions for Test and Measurement devices @@ -40,11 +41,38 @@ #define USBTMC488_REQUEST_GOTO_LOCAL 161 #define USBTMC488_REQUEST_LOCAL_LOCKOUT 162 +struct usbtmc_request { + __u8 bRequestType; + __u8 bRequest; + __u16 wValue; + __u16 wIndex; + __u16 wLength; +} __attribute__ ((packed)); + +struct usbtmc_ctrlrequest { + struct usbtmc_request req; + void __user *data; /* pointer to user space */ +} __attribute__ ((packed)); + struct usbtmc_termchar { __u8 term_char; __u8 term_char_enabled; } __attribute__ ((packed)); +/* + * usbtmc_message->flags: + */ +#define USBTMC_FLAG_ASYNC 0x0001 +#define USBTMC_FLAG_APPEND 0x0002 +#define USBTMC_FLAG_IGNORE_TRAILER 0x0004 + +struct usbtmc_message { + __u32 transfer_size; /* size of bytes to transfer */ + __u32 transferred; /* size of received/written bytes */ + __u32 flags; /* bit 0: 0 = synchronous; 1 = asynchronous */ + void __user *message; /* pointer to header and data in user space */ +} __attribute__ ((packed)); + /* Request values for USBTMC driver's ioctl entry point */ #define USBTMC_IOC_NR 91 #define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1) @@ -53,10 +81,15 @@ struct usbtmc_termchar { #define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4) #define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6) #define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7) +#define USBTMC_IOCTL_CTRL_REQUEST _IOWR(USBTMC_IOC_NR, 8, struct usbtmc_ctrlrequest) #define USBTMC_IOCTL_GET_TIMEOUT _IOR(USBTMC_IOC_NR, 9, __u32) #define USBTMC_IOCTL_SET_TIMEOUT _IOW(USBTMC_IOC_NR, 10, __u32) #define USBTMC_IOCTL_EOM_ENABLE _IOW(USBTMC_IOC_NR, 11, __u8) #define USBTMC_IOCTL_CONFIG_TERMCHAR _IOW(USBTMC_IOC_NR, 12, struct usbtmc_termchar) +#define USBTMC_IOCTL_WRITE _IOWR(USBTMC_IOC_NR, 13, struct usbtmc_message) +#define USBTMC_IOCTL_READ _IOWR(USBTMC_IOC_NR, 14, struct usbtmc_message) +#define USBTMC_IOCTL_WRITE_RESULT _IOWR(USBTMC_IOC_NR, 15, __u32) +#define USBTMC_IOCTL_API_VERSION _IOR(USBTMC_IOC_NR, 16, __u32) #define USBTMC488_IOCTL_GET_CAPS _IOR(USBTMC_IOC_NR, 17, unsigned char) #define USBTMC488_IOCTL_READ_STB _IOR(USBTMC_IOC_NR, 18, unsigned char) @@ -64,6 +97,14 @@ struct usbtmc_termchar { #define USBTMC488_IOCTL_GOTO_LOCAL _IO(USBTMC_IOC_NR, 20) #define USBTMC488_IOCTL_LOCAL_LOCKOUT _IO(USBTMC_IOC_NR, 21) #define USBTMC488_IOCTL_TRIGGER _IO(USBTMC_IOC_NR, 22) +#define USBTMC488_IOCTL_WAIT_SRQ _IOW(USBTMC_IOC_NR, 23, __u32) + +#define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8) +#define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8) + +/* Cancel and cleanup asynchronous calls */ +#define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35) +#define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36) /* Driver encoded usb488 capabilities */ #define USBTMC488_CAPABILITY_TRIGGER 1 diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index ff6cc6cb4227..d854cb19c42c 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -192,14 +192,14 @@ struct uvc_descriptor_header { /* 3.7.2. Video Control Interface Header Descriptor */ struct uvc_header_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u16 bcdUVC; - __u16 wTotalLength; - __u32 dwClockFrequency; - __u8 bInCollection; - __u8 baInterfaceNr[]; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 bcdUVC; + __le16 wTotalLength; + __le32 dwClockFrequency; + __u8 bInCollection; + __u8 baInterfaceNr[]; } __attribute__((__packed__)); #define UVC_DT_HEADER_SIZE(n) (12+(n)) @@ -209,57 +209,57 @@ struct uvc_header_descriptor { #define DECLARE_UVC_HEADER_DESCRIPTOR(n) \ struct UVC_HEADER_DESCRIPTOR(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u16 bcdUVC; \ - __u16 wTotalLength; \ - __u32 dwClockFrequency; \ - __u8 bInCollection; \ - __u8 baInterfaceNr[n]; \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __le16 bcdUVC; \ + __le16 wTotalLength; \ + __le32 dwClockFrequency; \ + __u8 bInCollection; \ + __u8 baInterfaceNr[n]; \ } __attribute__ ((packed)) /* 3.7.2.1. Input Terminal Descriptor */ struct uvc_input_terminal_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bTerminalID; - __u16 wTerminalType; - __u8 bAssocTerminal; - __u8 iTerminal; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __le16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; } __attribute__((__packed__)); #define UVC_DT_INPUT_TERMINAL_SIZE 8 /* 3.7.2.2. Output Terminal Descriptor */ struct uvc_output_terminal_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bTerminalID; - __u16 wTerminalType; - __u8 bAssocTerminal; - __u8 bSourceID; - __u8 iTerminal; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __le16 wTerminalType; + __u8 bAssocTerminal; + __u8 bSourceID; + __u8 iTerminal; } __attribute__((__packed__)); #define UVC_DT_OUTPUT_TERMINAL_SIZE 9 /* 3.7.2.3. Camera Terminal Descriptor */ struct uvc_camera_terminal_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bTerminalID; - __u16 wTerminalType; - __u8 bAssocTerminal; - __u8 iTerminal; - __u16 wObjectiveFocalLengthMin; - __u16 wObjectiveFocalLengthMax; - __u16 wOcularFocalLength; - __u8 bControlSize; - __u8 bmControls[3]; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bTerminalID; + __le16 wTerminalType; + __u8 bAssocTerminal; + __u8 iTerminal; + __le16 wObjectiveFocalLengthMin; + __le16 wObjectiveFocalLengthMax; + __le16 wOcularFocalLength; + __u8 bControlSize; + __u8 bmControls[3]; } __attribute__((__packed__)); #define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n)) @@ -293,15 +293,15 @@ struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \ /* 3.7.2.5. Processing Unit Descriptor */ struct uvc_processing_unit_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bUnitID; - __u8 bSourceID; - __u16 wMaxMultiplier; - __u8 bControlSize; - __u8 bmControls[2]; - __u8 iProcessing; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bUnitID; + __u8 bSourceID; + __le16 wMaxMultiplier; + __u8 bControlSize; + __u8 bmControls[2]; + __u8 iProcessing; } __attribute__((__packed__)); #define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) @@ -343,29 +343,29 @@ struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \ /* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */ struct uvc_control_endpoint_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u16 wMaxTransferSize; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __le16 wMaxTransferSize; } __attribute__((__packed__)); #define UVC_DT_CONTROL_ENDPOINT_SIZE 5 /* 3.9.2.1. Input Header Descriptor */ struct uvc_input_header_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bNumFormats; - __u16 wTotalLength; - __u8 bEndpointAddress; - __u8 bmInfo; - __u8 bTerminalLink; - __u8 bStillCaptureMethod; - __u8 bTriggerSupport; - __u8 bTriggerUsage; - __u8 bControlSize; - __u8 bmaControls[]; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __le16 wTotalLength; + __u8 bEndpointAddress; + __u8 bmInfo; + __u8 bTerminalLink; + __u8 bStillCaptureMethod; + __u8 bTriggerSupport; + __u8 bTriggerUsage; + __u8 bControlSize; + __u8 bmaControls[]; } __attribute__((__packed__)); #define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p)) @@ -375,32 +375,32 @@ struct uvc_input_header_descriptor { #define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \ struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bNumFormats; \ - __u16 wTotalLength; \ - __u8 bEndpointAddress; \ - __u8 bmInfo; \ - __u8 bTerminalLink; \ - __u8 bStillCaptureMethod; \ - __u8 bTriggerSupport; \ - __u8 bTriggerUsage; \ - __u8 bControlSize; \ - __u8 bmaControls[p][n]; \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __le16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bmInfo; \ + __u8 bTerminalLink; \ + __u8 bStillCaptureMethod; \ + __u8 bTriggerSupport; \ + __u8 bTriggerUsage; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ } __attribute__ ((packed)) /* 3.9.2.2. Output Header Descriptor */ struct uvc_output_header_descriptor { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bNumFormats; - __u16 wTotalLength; - __u8 bEndpointAddress; - __u8 bTerminalLink; - __u8 bControlSize; - __u8 bmaControls[]; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bNumFormats; + __le16 wTotalLength; + __u8 bEndpointAddress; + __u8 bTerminalLink; + __u8 bControlSize; + __u8 bmaControls[]; } __attribute__((__packed__)); #define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p)) @@ -410,15 +410,15 @@ struct uvc_output_header_descriptor { #define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \ struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bNumFormats; \ - __u16 wTotalLength; \ - __u8 bEndpointAddress; \ - __u8 bTerminalLink; \ - __u8 bControlSize; \ - __u8 bmaControls[p][n]; \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bNumFormats; \ + __le16 wTotalLength; \ + __u8 bEndpointAddress; \ + __u8 bTerminalLink; \ + __u8 bControlSize; \ + __u8 bmaControls[p][n]; \ } __attribute__ ((packed)) /* 3.9.2.6. Color matching descriptor */ @@ -473,19 +473,19 @@ struct uvc_format_uncompressed { /* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */ struct uvc_frame_uncompressed { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bFrameIndex; - __u8 bmCapabilities; - __u16 wWidth; - __u16 wHeight; - __u32 dwMinBitRate; - __u32 dwMaxBitRate; - __u32 dwMaxVideoFrameBufferSize; - __u32 dwDefaultFrameInterval; - __u8 bFrameIntervalType; - __u32 dwFrameInterval[]; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __le16 wWidth; + __le16 wHeight; + __le32 dwMinBitRate; + __le32 dwMaxBitRate; + __le32 dwMaxVideoFrameBufferSize; + __le32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __le32 dwFrameInterval[]; } __attribute__((__packed__)); #define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n)) @@ -495,19 +495,19 @@ struct uvc_frame_uncompressed { #define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \ struct UVC_FRAME_UNCOMPRESSED(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bFrameIndex; \ - __u8 bmCapabilities; \ - __u16 wWidth; \ - __u16 wHeight; \ - __u32 dwMinBitRate; \ - __u32 dwMaxBitRate; \ - __u32 dwMaxVideoFrameBufferSize; \ - __u32 dwDefaultFrameInterval; \ - __u8 bFrameIntervalType; \ - __u32 dwFrameInterval[n]; \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __le16 wWidth; \ + __le16 wHeight; \ + __le32 dwMinBitRate; \ + __le32 dwMaxBitRate; \ + __le32 dwMaxVideoFrameBufferSize; \ + __le32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __le32 dwFrameInterval[n]; \ } __attribute__ ((packed)) /* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */ @@ -529,19 +529,19 @@ struct uvc_format_mjpeg { /* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */ struct uvc_frame_mjpeg { - __u8 bLength; - __u8 bDescriptorType; - __u8 bDescriptorSubType; - __u8 bFrameIndex; - __u8 bmCapabilities; - __u16 wWidth; - __u16 wHeight; - __u32 dwMinBitRate; - __u32 dwMaxBitRate; - __u32 dwMaxVideoFrameBufferSize; - __u32 dwDefaultFrameInterval; - __u8 bFrameIntervalType; - __u32 dwFrameInterval[]; + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __le16 wWidth; + __le16 wHeight; + __le32 dwMinBitRate; + __le32 dwMaxBitRate; + __le32 dwMaxVideoFrameBufferSize; + __le32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __le32 dwFrameInterval[]; } __attribute__((__packed__)); #define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n)) @@ -551,19 +551,19 @@ struct uvc_frame_mjpeg { #define DECLARE_UVC_FRAME_MJPEG(n) \ struct UVC_FRAME_MJPEG(n) { \ - __u8 bLength; \ - __u8 bDescriptorType; \ - __u8 bDescriptorSubType; \ - __u8 bFrameIndex; \ - __u8 bmCapabilities; \ - __u16 wWidth; \ - __u16 wHeight; \ - __u32 dwMinBitRate; \ - __u32 dwMaxBitRate; \ - __u32 dwMaxVideoFrameBufferSize; \ - __u32 dwDefaultFrameInterval; \ - __u8 bFrameIntervalType; \ - __u32 dwFrameInterval[n]; \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __le16 wWidth; \ + __le16 wHeight; \ + __le32 dwMinBitRate; \ + __le32 dwMaxBitRate; \ + __le32 dwMaxVideoFrameBufferSize; \ + __le32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __le32 dwFrameInterval[n]; \ } __attribute__ ((packed)) #endif /* __LINUX_USB_VIDEO_H */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 1aa7b82e8169..f378b9802d8b 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -200,6 +200,7 @@ struct vfio_device_info { #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */ +#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */ __u32 num_regions; /* Max region index + 1 */ __u32 num_irqs; /* Max IRQ index + 1 */ }; @@ -215,6 +216,7 @@ struct vfio_device_info { #define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform" #define VFIO_DEVICE_API_AMBA_STRING "vfio-amba" #define VFIO_DEVICE_API_CCW_STRING "vfio-ccw" +#define VFIO_DEVICE_API_AP_STRING "vfio-ap" /** * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 25a16760de2a..1254b51a551a 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -763,10 +763,28 @@ struct ib_uverbs_sge { __u32 lkey; }; +enum ib_uverbs_wr_opcode { + IB_UVERBS_WR_RDMA_WRITE = 0, + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1, + IB_UVERBS_WR_SEND = 2, + IB_UVERBS_WR_SEND_WITH_IMM = 3, + IB_UVERBS_WR_RDMA_READ = 4, + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5, + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6, + IB_UVERBS_WR_LOCAL_INV = 7, + IB_UVERBS_WR_BIND_MW = 8, + IB_UVERBS_WR_SEND_WITH_INV = 9, + IB_UVERBS_WR_TSO = 10, + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + /* Review enum ib_wr_opcode before modifying this */ +}; + struct ib_uverbs_send_wr { __aligned_u64 wr_id; __u32 num_sge; - __u32 opcode; + __u32 opcode; /* see enum ib_uverbs_wr_opcode */ __u32 send_flags; union { __be32 imm_data; diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index addbb9c4529e..8fa9f90e2bb1 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -45,6 +45,9 @@ enum { MLX5_QP_FLAG_BFREG_INDEX = 1 << 3, MLX5_QP_FLAG_TYPE_DCT = 1 << 4, MLX5_QP_FLAG_TYPE_DCI = 1 << 5, + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, + MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, + MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, }; enum { @@ -349,9 +352,22 @@ struct mlx5_ib_create_qp_rss { __u32 flags; }; +enum mlx5_ib_create_qp_resp_mask { + MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0, + MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1, + MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2, + MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3, +}; + struct mlx5_ib_create_qp_resp { __u32 bfreg_index; __u32 reserved; + __u32 comp_mask; + __u32 tirn; + __u32 tisn; + __u32 rqn; + __u32 sqn; + __u32 reserved1; }; struct mlx5_ib_alloc_mw { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 9c51801b9e64..408e220034de 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -125,6 +125,7 @@ enum mlx5_ib_flow_matcher_create_attrs { MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA, + MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS, }; enum mlx5_ib_flow_matcher_destroy_attrs { @@ -155,6 +156,8 @@ enum mlx5_ib_create_flow_attrs { MLX5_IB_ATTR_CREATE_FLOW_DEST_QP, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX, MLX5_IB_ATTR_CREATE_FLOW_MATCHER, + MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, + MLX5_IB_ATTR_CREATE_FLOW_TAG, }; enum mlx5_ib_destoy_flow_attrs { @@ -166,4 +169,22 @@ enum mlx5_ib_flow_methods { MLX5_IB_METHOD_DESTROY_FLOW, }; +enum mlx5_ib_flow_action_methods { + MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT, +}; + +enum mlx5_ib_create_flow_action_create_modify_header_attrs { + MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM, + MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE, +}; + +enum mlx5_ib_create_flow_action_create_packet_reformat_attrs { + MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE, + MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE, + MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF, +}; + #endif diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 8a2fb33f3ed4..4ef62c0e8452 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -39,5 +39,17 @@ enum mlx5_ib_uapi_flow_action_flags { MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0, }; +enum mlx5_ib_uapi_flow_table_type { + MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, + MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, +}; + +enum mlx5_ib_uapi_flow_action_packet_reformat_type { + MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 = 0x0, + MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x1, + MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x2, + MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3, +}; + #endif diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index edba6351ac13..f9c41bf59efc 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -227,8 +227,9 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_UNSPEC, RDMA_NLDEV_CMD_GET, /* can dump */ + RDMA_NLDEV_CMD_SET, - /* 2 - 4 are free to use */ + /* 3 - 4 are free to use */ RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */ diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h index 24800c6c1f32..06c34d99be85 100644 --- a/include/uapi/rdma/rdma_user_ioctl_cmds.h +++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h @@ -53,7 +53,7 @@ enum { struct ib_uverbs_attr { __u16 attr_id; /* command specific type attribute */ - __u16 len; /* only for pointers */ + __u16 len; /* only for pointers and IDRs array */ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ union { struct { @@ -63,7 +63,10 @@ struct ib_uverbs_attr { __u16 reserved; } attr_data; union { - /* Used by PTR_IN/OUT, ENUM_IN and IDR */ + /* + * ptr to command, inline data, idr/fd or + * ptr to __u32 array of IDRs + */ __aligned_u64 data; /* Used by FD_IN and FD_OUT */ __s64 data_s64; diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h new file mode 100644 index 000000000000..17c7abd0803a --- /dev/null +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * UFS Transport SGIO v4 BSG Message Support + * + * Copyright (C) 2011-2013 Samsung India Software Operations + * Copyright (C) 2018 Western Digital Corporation + */ +#ifndef SCSI_BSG_UFS_H +#define SCSI_BSG_UFS_H + +#include <linux/types.h> +/* + * This file intended to be included by both kernel and user space + */ + +#define UFS_CDB_SIZE 16 +#define UPIU_TRANSACTION_UIC_CMD 0x1F +/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */ +#define UIC_CMD_SIZE (sizeof(__u32) * 4) + +/** + * struct utp_upiu_header - UPIU header structure + * @dword_0: UPIU header DW-0 + * @dword_1: UPIU header DW-1 + * @dword_2: UPIU header DW-2 + */ +struct utp_upiu_header { + __be32 dword_0; + __be32 dword_1; + __be32 dword_2; +}; + +/** + * struct utp_upiu_query - upiu request buffer structure for + * query request. + * @opcode: command to perform B-0 + * @idn: a value that indicates the particular type of data B-1 + * @index: Index to further identify data B-2 + * @selector: Index to further identify data B-3 + * @reserved_osf: spec reserved field B-4,5 + * @length: number of descriptor bytes to read/write B-6,7 + * @value: Attribute value to be written DW-5 + * @reserved: spec reserved DW-6,7 + */ +struct utp_upiu_query { + __u8 opcode; + __u8 idn; + __u8 index; + __u8 selector; + __be16 reserved_osf; + __be16 length; + __be32 value; + __be32 reserved[2]; +}; + +/** + * struct utp_upiu_cmd - Command UPIU structure + * @data_transfer_len: Data Transfer Length DW-3 + * @cdb: Command Descriptor Block CDB DW-4 to DW-7 + */ +struct utp_upiu_cmd { + __be32 exp_data_transfer_len; + __u8 cdb[UFS_CDB_SIZE]; +}; + +/** + * struct utp_upiu_req - general upiu request structure + * @header:UPIU header structure DW-0 to DW-2 + * @sc: fields structure for scsi command DW-3 to DW-7 + * @qr: fields structure for query request DW-3 to DW-7 + */ +struct utp_upiu_req { + struct utp_upiu_header header; + union { + struct utp_upiu_cmd sc; + struct utp_upiu_query qr; + struct utp_upiu_query tr; + /* use utp_upiu_query to host the 4 dwords of uic command */ + struct utp_upiu_query uc; + }; +}; + +/* request (CDB) structure of the sg_io_v4 */ +struct ufs_bsg_request { + __u32 msgcode; + struct utp_upiu_req upiu_req; +}; + +/* response (request sense data) structure of the sg_io_v4 */ +struct ufs_bsg_reply { + /* + * The completion result. Result exists in two forms: + * if negative, it is an -Exxx system errno value. There will + * be no further reply information supplied. + * else, it's the 4-byte scsi error result, with driver, host, + * msg and status fields. The per-msgcode reply structure + * will contain valid data. + */ + __u32 result; + + /* If there was reply_payload, how much was received? */ + __u32 reply_payload_rcv_len; + + struct utp_upiu_req upiu_rsp; +}; +#endif /* UFS_BSG_H */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index ed0a120d4f08..404d4b9ffe76 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -752,7 +752,7 @@ struct snd_timer_info { #define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */ struct snd_timer_params { - unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */ + unsigned int flags; /* flags - SNDRV_TIMER_PSFLG_* */ unsigned int ticks; /* requested resolution in ticks */ unsigned int queue_size; /* total size of queue (32-1024) */ unsigned int reserved0; /* reserved, was: failure locations */ diff --git a/include/xen/events.h b/include/xen/events.h index c3e6bc643a7b..a48897199975 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -89,11 +89,13 @@ unsigned irq_from_evtchn(unsigned int evtchn); int irq_from_virq(unsigned int cpu, unsigned int virq); unsigned int evtchn_from_irq(unsigned irq); +#ifdef CONFIG_XEN_PVHVM /* Xen HVM evtchn vector callback */ void xen_hvm_callback_vector(void); #ifdef CONFIG_TRACING #define trace_xen_hvm_callback_vector xen_hvm_callback_vector #endif +#endif int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); void xen_hvm_evtchn_do_upcall(void); diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h index 4c5751c26f87..447004861f00 100644 --- a/include/xen/interface/memory.h +++ b/include/xen/interface/memory.h @@ -245,12 +245,6 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map); /* - * Prevent the balloon driver from changing the memory reservation - * during a driver critical region. - */ -extern spinlock_t xen_reservation_lock; - -/* * Unmaps the page appearing at a particular GPFN from the specified guest's * pseudophysical address space. * arg == addr of xen_remove_from_physmap_t. diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index fd18c974a619..18803ff76e27 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -5,6 +5,7 @@ #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/efi.h> +#include <xen/features.h> #include <asm/xen/interface.h> #include <xen/interface/vcpu.h> @@ -47,6 +48,10 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, dma_addr_t *dma_handle); void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); + +int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, + unsigned int domid, bool no_translate, struct page **pages); #else static inline int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, @@ -58,10 +63,50 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart, static inline void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) { } + +static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, + xen_pfn_t *pfn, int nr, int *err_ptr, + pgprot_t prot, unsigned int domid, + bool no_translate, struct page **pages) +{ + BUG(); + return 0; +} #endif struct vm_area_struct; +#ifdef CONFIG_XEN_AUTO_XLATE +int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *gfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, + struct page **pages); +int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, + int nr, struct page **pages); +#else +/* + * These two functions are called from arch/x86/xen/mmu.c and so stubs + * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE. + */ +static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *gfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, + struct page **pages) +{ + return -EOPNOTSUPP; +} + +static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, + int nr, struct page **pages) +{ + return -EOPNOTSUPP; +} +#endif + /* * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn * @vma: VMA to map the pages into @@ -79,12 +124,25 @@ struct vm_area_struct; * Returns the number of successfully mapped frames, or a -ve error * code. */ -int xen_remap_domain_gfn_array(struct vm_area_struct *vma, - unsigned long addr, - xen_pfn_t *gfn, int nr, - int *err_ptr, pgprot_t prot, - unsigned domid, - struct page **pages); +static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t *gfn, int nr, + int *err_ptr, pgprot_t prot, + unsigned int domid, + struct page **pages) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, + prot, domid, pages); + + /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, + * and the consequences later is quite hard to detect what the actual + * cause of "wrong memory was mapped in". + */ + BUG_ON(err_ptr == NULL); + return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, + false, pages); +} /* * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn @@ -103,10 +161,18 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma, * Returns the number of successfully mapped frames, or a -ve error * code. */ -int xen_remap_domain_mfn_array(struct vm_area_struct *vma, - unsigned long addr, xen_pfn_t *mfn, int nr, - int *err_ptr, pgprot_t prot, - unsigned int domid, struct page **pages); +static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma, + unsigned long addr, xen_pfn_t *mfn, + int nr, int *err_ptr, + pgprot_t prot, unsigned int domid, + struct page **pages) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return -EOPNOTSUPP; + + return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, + true, pages); +} /* xen_remap_domain_gfn_range() - map a range of foreign frames * @vma: VMA to map the pages into @@ -120,44 +186,21 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma, * Returns the number of successfully mapped frames, or a -ve error * code. */ -int xen_remap_domain_gfn_range(struct vm_area_struct *vma, - unsigned long addr, - xen_pfn_t gfn, int nr, - pgprot_t prot, unsigned domid, - struct page **pages); -int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, - int numpgs, struct page **pages); - -#ifdef CONFIG_XEN_AUTO_XLATE -int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, - unsigned long addr, - xen_pfn_t *gfn, int nr, - int *err_ptr, pgprot_t prot, - unsigned domid, - struct page **pages); -int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, - int nr, struct page **pages); -#else -/* - * These two functions are called from arch/x86/xen/mmu.c and so stubs - * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE. - */ -static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, - unsigned long addr, - xen_pfn_t *gfn, int nr, - int *err_ptr, pgprot_t prot, - unsigned int domid, - struct page **pages) +static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma, + unsigned long addr, + xen_pfn_t gfn, int nr, + pgprot_t prot, unsigned int domid, + struct page **pages) { - return -EOPNOTSUPP; -} + if (xen_feature(XENFEAT_auto_translated_physmap)) + return -EOPNOTSUPP; -static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, - int nr, struct page **pages) -{ - return -EOPNOTSUPP; + return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, + pages); } -#endif + +int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, + int numpgs, struct page **pages); int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, unsigned long nr_grant_frames); diff --git a/include/xen/xen.h b/include/xen/xen.h index 1e1d9bd0bd37..d7a2678da77f 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -39,4 +39,8 @@ extern uint32_t xen_start_flags; #define xen_initial_domain() (0) #endif /* CONFIG_XEN_DOM0 */ +struct bio_vec; +bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); + #endif /* _XEN_XEN_H */ |