diff options
Diffstat (limited to 'include')
665 files changed, 14526 insertions, 5603 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0c23fd0548d1..a92bea7184a8 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -80,7 +80,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); #ifdef CONFIG_ACPI -#include <linux/proc_fs.h> +struct proc_dir_entry; #define ACPI_BUS_FILE_ROOT "acpi" extern struct proc_dir_entry *acpi_root_dir; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 8e8be989c2a6..49b519f36b69 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20200110 +#define ACPI_CA_VERSION 0x20200326 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -752,7 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) -ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number)) ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 02d06b79e1cd..43549547ed3e 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -862,7 +862,7 @@ enum acpi_erst_instructions { /* Command status return values */ enum acpi_erst_command_status { - ACPI_ERST_SUCESS = 0, + ACPI_ERST_SUCCESS = 0, ACPI_ERST_NO_SPACE = 1, ACPI_ERST_NOT_AVAILABLE = 2, ACPI_ERST_FAILURE = 3, diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index b818ba60e19d..ec66779cb193 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -43,6 +43,7 @@ #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ #define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ #define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ +#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */ /* * All tables must be byte-packed to match the ACPI specification, since @@ -274,7 +275,8 @@ struct acpi_ivrs_header { /* Values for subtable Type above */ enum acpi_ivrs_type { - ACPI_IVRS_TYPE_HARDWARE = 0x10, + ACPI_IVRS_TYPE_HARDWARE1 = 0x10, + ACPI_IVRS_TYPE_HARDWARE2 = 0x11, ACPI_IVRS_TYPE_MEMORY1 = 0x20, ACPI_IVRS_TYPE_MEMORY2 = 0x21, ACPI_IVRS_TYPE_MEMORY3 = 0x22 @@ -301,13 +303,26 @@ enum acpi_ivrs_type { /* 0x10: I/O Virtualization Hardware Definition Block (IVHD) */ -struct acpi_ivrs_hardware { +struct acpi_ivrs_hardware_10 { struct acpi_ivrs_header header; u16 capability_offset; /* Offset for IOMMU control fields */ u64 base_address; /* IOMMU control registers */ u16 pci_segment_group; u16 info; /* MSI number and unit ID */ - u32 reserved; + u32 feature_reporting; +}; + +/* 0x11: I/O Virtualization Hardware Definition Block (IVHD) */ + +struct acpi_ivrs_hardware_11 { + struct acpi_ivrs_header header; + u16 capability_offset; /* Offset for IOMMU control fields */ + u64 base_address; /* IOMMU control registers */ + u16 pci_segment_group; + u16 info; /* MSI number and unit ID */ + u32 attributes; + u64 efr_register_image; + u64 reserved; }; /* Masks for Info field above */ diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 2bf3baf819bb..b0b163b9efc6 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -39,7 +39,7 @@ #define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ #define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ #define ACPI_SIG_WPBT "WPBT" /* Windows Platform Binary Table */ -#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Migrations Table */ +#define ACPI_SIG_WSMT "WSMT" /* Windows SMM Security Mitigations Table */ #define ACPI_SIG_XENV "XENV" /* Xen Environment table */ #define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */ @@ -673,10 +673,10 @@ struct acpi_table_wpbt { /******************************************************************************* * - * WSMT - Windows SMM Security Migrations Table + * WSMT - Windows SMM Security Mitigations Table * Version 1 * - * Conforms to "Windows SMM Security Migrations Table", + * Conforms to "Windows SMM Security Mitigations Table", * Version 1.0, April 18, 2016 * ******************************************************************************/ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index a2583c2bc054..4defed58ea33 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -532,11 +532,12 @@ typedef u64 acpi_integer; strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE) /* - * Algorithm to obtain access bit width. + * Algorithm to obtain access bit or byte width. * Can be used with access_width of struct acpi_generic_address and access_size of * struct acpi_resource_generic_register. */ #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) /******************************************************************************* * diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h index 9dd4689a39cf..9e1367b19069 100644 --- a/include/acpi/acuuid.h +++ b/include/acpi/acuuid.h @@ -57,4 +57,4 @@ #define UUID_THERMAL_EXTENSIONS "14d399cd-7a27-4b18-8fb4-7cb7b9f4e500" #define UUID_DEVICE_PROPERTIES "daffd814-6eba-4d8c-8a91-bc9bbf4aa301" -#endif /* __AUUID_H__ */ +#endif /* __ACUUID_H__ */ diff --git a/include/acpi/button.h b/include/acpi/button.h index 340da7784cc8..af2fce5d2ee3 100644 --- a/include/acpi/button.h +++ b/include/acpi/button.h @@ -2,6 +2,10 @@ #ifndef ACPI_BUTTON_H #define ACPI_BUTTON_H +#define ACPI_BUTTON_HID_POWER "PNP0C0C" +#define ACPI_BUTTON_HID_LID "PNP0C0D" +#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" + #if IS_ENABLED(CONFIG_ACPI_BUTTON) extern int acpi_lid_open(void); #else diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 47805172e73d..683e124ad517 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -297,6 +297,14 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx } #endif +static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, + bool direct) +{ + if (direct || (is_percpu_thread() && cpu == smp_processor_id())) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + /* in processor_perflib.c */ #ifdef CONFIG_CPU_FREQ diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index cd17d50697cc..36341dfded70 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -4,6 +4,58 @@ # (This file is not included when SRCARCH=um since UML borrows several # asm headers from the host architecutre.) +mandatory-y += atomic.h +mandatory-y += barrier.h +mandatory-y += bitops.h +mandatory-y += bug.h +mandatory-y += bugs.h +mandatory-y += cacheflush.h +mandatory-y += checksum.h +mandatory-y += compat.h +mandatory-y += current.h +mandatory-y += delay.h +mandatory-y += device.h +mandatory-y += div64.h mandatory-y += dma-contiguous.h +mandatory-y += dma-mapping.h +mandatory-y += dma.h +mandatory-y += emergency-restart.h +mandatory-y += exec.h +mandatory-y += fb.h +mandatory-y += ftrace.h +mandatory-y += futex.h +mandatory-y += hardirq.h +mandatory-y += hw_irq.h +mandatory-y += io.h +mandatory-y += irq.h +mandatory-y += irq_regs.h +mandatory-y += irq_work.h +mandatory-y += kdebug.h +mandatory-y += kmap_types.h +mandatory-y += kprobes.h +mandatory-y += linkage.h +mandatory-y += local.h +mandatory-y += mm-arch-hooks.h +mandatory-y += mmiowb.h +mandatory-y += mmu.h +mandatory-y += mmu_context.h +mandatory-y += module.h mandatory-y += msi.h +mandatory-y += pci.h +mandatory-y += percpu.h +mandatory-y += pgalloc.h +mandatory-y += preempt.h +mandatory-y += sections.h +mandatory-y += serial.h +mandatory-y += shmparam.h mandatory-y += simd.h +mandatory-y += switch_to.h +mandatory-y += timex.h +mandatory-y += tlbflush.h +mandatory-y += topology.h +mandatory-y += trace_clock.h +mandatory-y += uaccess.h +mandatory-y += unaligned.h +mandatory-y += vga.h +mandatory-y += word-at-a-time.h +mandatory-y += xor.h diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index bfc96bf6606e..df9b5bc3d282 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h @@ -4,8 +4,9 @@ /* * For the benefit of those who are trying to port Linux to another - * architecture, here are some C-language equivalents. You should - * recode these in the native assembly language, if at all possible. + * architecture, here are some C-language equivalents. They should + * generate reasonable code, so take a look at what your compiler spits + * out before rolling your own buggy implementation in assembly language. * * C language equivalents written by Theodore Ts'o, 9/26/92 */ diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 02970b11f71f..f4c3470480c7 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -34,7 +34,6 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) u32 tmp; preempt_disable(); - pagefault_disable(); ret = -EFAULT; if (unlikely(get_user(oldval, uaddr) != 0)) @@ -67,7 +66,6 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) ret = -EFAULT; out_pagefault_enable: - pagefault_enable(); preempt_enable(); if (ret == 0) diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 19eadac415c4..aea9aee1f3e9 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -2,10 +2,8 @@ #ifndef _ASM_GENERIC_GPIO_H #define _ASM_GENERIC_GPIO_H -#include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/of.h> #ifdef CONFIG_GPIOLIB @@ -140,6 +138,8 @@ static inline void gpio_unexport(unsigned gpio) #else /* !CONFIG_GPIOLIB */ +#include <linux/kernel.h> + static inline bool gpio_is_valid(int number) { /* only non-negative numbers are valid */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e2e2bef07dd2..329b8c8ca703 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -10,6 +10,7 @@ #include <linux/mm_types.h> #include <linux/bug.h> #include <linux/errno.h> +#include <asm-generic/pgtable_uffd.h> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h new file mode 100644 index 000000000000..828966d4c281 --- /dev/null +++ b/include/asm-generic/pgtable_uffd.h @@ -0,0 +1,66 @@ +#ifndef _ASM_GENERIC_PGTABLE_UFFD_H +#define _ASM_GENERIC_PGTABLE_UFFD_H + +#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP +static __always_inline int pte_uffd_wp(pte_t pte) +{ + return 0; +} + +static __always_inline int pmd_uffd_wp(pmd_t pmd) +{ + return 0; +} + +static __always_inline pte_t pte_mkuffd_wp(pte_t pte) +{ + return pte; +} + +static __always_inline pmd_t pmd_mkuffd_wp(pmd_t pmd) +{ + return pmd; +} + +static __always_inline pte_t pte_clear_uffd_wp(pte_t pte) +{ + return pte; +} + +static __always_inline pmd_t pmd_clear_uffd_wp(pmd_t pmd) +{ + return pmd; +} + +static __always_inline pte_t pte_swp_mkuffd_wp(pte_t pte) +{ + return pte; +} + +static __always_inline int pte_swp_uffd_wp(pte_t pte) +{ + return 0; +} + +static __always_inline pte_t pte_swp_clear_uffd_wp(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_uffd_wp(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd) +{ + return pmd; +} +#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ + +#endif /* _ASM_GENERIC_PGTABLE_UFFD_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index f391f6b500b4..3f1649a8cf55 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -13,6 +13,7 @@ #include <linux/mmu_notifier.h> #include <linux/swap.h> +#include <linux/hugetlb_inline.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> @@ -398,7 +399,7 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) * We rely on tlb_end_vma() to issue a flush, such that when we reset * these values the batch is empty. */ - tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); + tlb->vma_huge = is_vm_hugetlb_page(vma); tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); } diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h index cec543d9e87b..c835607f78ae 100644 --- a/include/asm-generic/vdso/vsyscall.h +++ b/include/asm-generic/vdso/vsyscall.h @@ -11,20 +11,6 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) } #endif /* __arch_get_k_vdso_data */ -#ifndef __arch_update_vdso_data -static __always_inline bool __arch_update_vdso_data(void) -{ - return true; -} -#endif /* __arch_update_vdso_data */ - -#ifndef __arch_get_clock_mode -static __always_inline int __arch_get_clock_mode(struct timekeeper *tk) -{ - return 0; -} -#endif /* __arch_get_clock_mode */ - #ifndef __arch_update_vsyscall static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e00f41aa8ec4..71e387a5fe90 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -535,6 +535,7 @@ \ RO_EXCEPTION_TABLE \ NOTES \ + BTF \ \ . = ALIGN((align)); \ __end_rodata = .; @@ -622,6 +623,20 @@ } /* + * .BTF + */ +#ifdef CONFIG_DEBUG_INFO_BTF +#define BTF \ + .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ + __start_BTF = .; \ + *(.BTF) \ + __stop_BTF = .; \ + } +#else +#define BTF +#endif + +/* * Init task */ #define INIT_TASK_DATA_SECTION(align) \ @@ -894,10 +909,17 @@ * section definitions so that such archs put those in earlier section * definitions. */ +#ifdef RUNTIME_DISCARD_EXIT +#define EXIT_DISCARDS +#else +#define EXIT_DISCARDS \ + EXIT_TEXT \ + EXIT_DATA +#endif + #define DISCARDS \ /DISCARD/ : { \ - EXIT_TEXT \ - EXIT_DATA \ + EXIT_DISCARDS \ EXIT_CALL \ *(.discard) \ *(.discard.*) \ diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h index 7d9598dc578d..25f05235866e 100644 --- a/include/clocksource/timer-ti-dm.h +++ b/include/clocksource/timer-ti-dm.h @@ -105,17 +105,17 @@ struct omap_dm_timer { void __iomem *pend; /* write pending */ void __iomem *func_base; /* function register base */ + atomic_t enabled; unsigned long rate; unsigned reserved:1; unsigned posted:1; struct timer_regs context; - int (*get_context_loss_count)(struct device *); - int ctx_loss_count; int revision; u32 capability; u32 errata; struct platform_device *pdev; struct list_head node; + struct notifier_block nb; }; int omap_dm_timer_reserve_systimer(int id); diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 1b3ebe8593c0..62c68550aab6 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -43,27 +43,33 @@ * * Memory Structure: * - * To support the needs of the most prominent user of AEAD ciphers, namely - * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere - * to. - * - * The scatter list pointing to the input data must contain: - * - * * for RFC4106 ciphers, the concatenation of - * associated authentication data || IV || plaintext or ciphertext. Note, the - * same IV (buffer) is also set with the aead_request_set_crypt call. Note, - * the API call of aead_request_set_ad must provide the length of the AAD and - * the IV. The API call of aead_request_set_crypt only points to the size of - * the input plaintext or ciphertext. - * - * * for "normal" AEAD ciphers, the concatenation of - * associated authentication data || plaintext or ciphertext. - * - * It is important to note that if multiple scatter gather list entries form - * the input data mentioned above, the first entry must not point to a NULL - * buffer. If there is any potential where the AAD buffer can be NULL, the - * calling code must contain a precaution to ensure that this does not result - * in the first scatter gather list entry pointing to a NULL buffer. + * The source scatterlist must contain the concatenation of + * associated data || plaintext or ciphertext. + * + * The destination scatterlist has the same layout, except that the plaintext + * (resp. ciphertext) will grow (resp. shrink) by the authentication tag size + * during encryption (resp. decryption). + * + * In-place encryption/decryption is enabled by using the same scatterlist + * pointer for both the source and destination. + * + * Even in the out-of-place case, space must be reserved in the destination for + * the associated data, even though it won't be written to. This makes the + * in-place and out-of-place cases more consistent. It is permissible for the + * "destination" associated data to alias the "source" associated data. + * + * As with the other scatterlist crypto APIs, zero-length scatterlist elements + * are not allowed in the used part of the scatterlist. Thus, if there is no + * associated data, the first element must point to the plaintext/ciphertext. + * + * To meet the needs of IPsec, a special quirk applies to rfc4106, rfc4309, + * rfc4543, and rfc7539esp ciphers. For these ciphers, the final 'ivsize' bytes + * of the associated data buffer must contain a second copy of the IV. This is + * in addition to the copy passed to aead_request_set_crypt(). These two IV + * copies must not differ; different implementations of the same algorithm may + * behave differently in that case. Note that the algorithm might not actually + * treat the IV as associated data; nevertheless the length passed to + * aead_request_set_ad() must include it. */ struct crypto_aead; diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h index 4e6dc840b159..9ecb3c1f0f15 100644 --- a/include/crypto/curve25519.h +++ b/include/crypto/curve25519.h @@ -33,7 +33,8 @@ bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], const u8 secret[CURVE25519_KEY_SIZE], const u8 basepoint[CURVE25519_KEY_SIZE]) { - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) && + (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX))) curve25519_arch(mypublic, secret, basepoint); else curve25519_generic(mypublic, secret, basepoint); @@ -49,7 +50,8 @@ __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], CURVE25519_KEY_SIZE))) return false; - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) && + (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX))) curve25519_base_arch(pub, secret); else curve25519_generic(pub, secret, curve25519_base_point); diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 24cfa96f98ea..56527c85d122 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -66,7 +66,7 @@ struct af_alg_sgl { struct af_alg_tsgl { struct list_head list; unsigned int cur; /* Last processed SG entry */ - struct scatterlist sg[0]; /* Array of SGs forming the SGL */ + struct scatterlist sg[]; /* Array of SGs forming the SGL */ }; #define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 9d4d5cc47969..0b34a12c4a1c 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -129,6 +129,7 @@ struct dw_hdmi_plat_data { unsigned long input_bus_format; unsigned long input_bus_encoding; bool use_drm_infoframe; + bool ycbcr_420_allowed; /* Vendor PHY support */ const struct dw_hdmi_phy_ops *phy_ops; diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h index 1cc77bf38324..d96626a0e3fa 100644 --- a/include/drm/bridge/mhl.h +++ b/include/drm/bridge/mhl.h @@ -327,13 +327,13 @@ struct mhl_burst_bits_per_pixel_fmt { struct { u8 stream_id; u8 pixel_format; - } __packed desc[0]; + } __packed desc[]; } __packed; struct mhl_burst_emsc_support { struct mhl3_burst_header hdr; u8 num_entries; - __be16 burst_id[0]; + __be16 burst_id[]; } __packed; struct mhl_burst_audio_descr { diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 951dfb15c27b..7b6cb4774e7d 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -670,6 +670,9 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state, } int __must_check +drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, + struct drm_encoder *encoder); +int __must_check drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_crtc *crtc); int __must_check @@ -992,4 +995,77 @@ drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state) return state->active || state->self_refresh_active; } +/** + * struct drm_bus_cfg - bus configuration + * + * This structure stores the configuration of a physical bus between two + * components in an output pipeline, usually between two bridges, an encoder + * and a bridge, or a bridge and a connector. + * + * The bus configuration is stored in &drm_bridge_state separately for the + * input and output buses, as seen from the point of view of each bridge. The + * bus configuration of a bridge output is usually identical to the + * configuration of the next bridge's input, but may differ if the signals are + * modified between the two bridges, for instance by an inverter on the board. + * The input and output configurations of a bridge may differ if the bridge + * modifies the signals internally, for instance by performing format + * conversion, or modifying signals polarities. + */ +struct drm_bus_cfg { + /** + * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format) + * + * This field should not be directly modified by drivers + * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus + * format negotiation). + */ + u32 format; + + /** + * @flags: DRM_BUS_* flags used on this bus + */ + u32 flags; +}; + +/** + * struct drm_bridge_state - Atomic bridge state object + */ +struct drm_bridge_state { + /** + * @base: inherit from &drm_private_state + */ + struct drm_private_state base; + + /** + * @bridge: the bridge this state refers to + */ + struct drm_bridge *bridge; + + /** + * @input_bus_cfg: input bus configuration + */ + struct drm_bus_cfg input_bus_cfg; + + /** + * @output_bus_cfg: input bus configuration + */ + struct drm_bus_cfg output_bus_cfg; +}; + +static inline struct drm_bridge_state * +drm_priv_to_bridge_state(struct drm_private_state *priv) +{ + return container_of(priv, struct drm_bridge_state, base); +} + +struct drm_bridge_state * +drm_atomic_get_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); +struct drm_bridge_state * +drm_atomic_get_old_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); +struct drm_bridge_state * +drm_atomic_get_new_bridge_state(struct drm_atomic_state *state, + struct drm_bridge *bridge); + #endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 9db3cac48f4f..b268180c97eb 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -224,4 +224,12 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, return old_plane_state->crtc && !new_plane_state->crtc; } +u32 * +drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + #endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h index 8171dea4cc22..3f8f1d627f7c 100644 --- a/include/drm/drm_atomic_state_helper.h +++ b/include/drm/drm_atomic_state_helper.h @@ -26,6 +26,8 @@ #include <linux/types.h> +struct drm_bridge; +struct drm_bridge_state; struct drm_crtc; struct drm_crtc_state; struct drm_plane; @@ -80,3 +82,14 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state); void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, struct drm_private_state *state); + +void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge, + struct drm_bridge_state *state); +struct drm_bridge_state * +drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge); +void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge, + struct drm_bridge_state *state); +void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge, + struct drm_bridge_state *state); +struct drm_bridge_state * +drm_atomic_helper_bridge_reset(struct drm_bridge *bridge); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 694e153a7531..ea2aa5ebae34 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -23,15 +23,32 @@ #ifndef __DRM_BRIDGE_H__ #define __DRM_BRIDGE_H__ -#include <linux/list.h> #include <linux/ctype.h> +#include <linux/list.h> +#include <linux/mutex.h> + +#include <drm/drm_atomic.h> #include <drm/drm_encoder.h> #include <drm/drm_mode_object.h> #include <drm/drm_modes.h> struct drm_bridge; struct drm_bridge_timings; +struct drm_connector; struct drm_panel; +struct edid; +struct i2c_adapter; + +/** + * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach + */ +enum drm_bridge_attach_flags { + /** + * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge + * shall not create a drm_connector. + */ + DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0), +}; /** * struct drm_bridge_funcs - drm_bridge control functions @@ -41,7 +58,8 @@ struct drm_bridge_funcs { * @attach: * * This callback is invoked whenever our bridge is being attached to a - * &drm_encoder. + * &drm_encoder. The flags argument tunes the behaviour of the attach + * operation (see DRM_BRIDGE_ATTACH_*). * * The @attach callback is optional. * @@ -49,7 +67,8 @@ struct drm_bridge_funcs { * * Zero on success, error code on failure. */ - int (*attach)(struct drm_bridge *bridge); + int (*attach)(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags); /** * @detach: @@ -109,7 +128,9 @@ struct drm_bridge_funcs { * this function passes all other callbacks must succeed for this * configuration. * - * The @mode_fixup callback is optional. + * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup() + * is not called when &drm_bridge_funcs.atomic_check() is implemented, + * so only one of them should be provided. * * NOTE: * @@ -263,7 +284,7 @@ struct drm_bridge_funcs { * The @atomic_pre_enable callback is optional. */ void (*atomic_pre_enable)(struct drm_bridge *bridge, - struct drm_atomic_state *old_state); + struct drm_bridge_state *old_bridge_state); /** * @atomic_enable: @@ -288,7 +309,7 @@ struct drm_bridge_funcs { * The @atomic_enable callback is optional. */ void (*atomic_enable)(struct drm_bridge *bridge, - struct drm_atomic_state *old_state); + struct drm_bridge_state *old_bridge_state); /** * @atomic_disable: * @@ -311,7 +332,7 @@ struct drm_bridge_funcs { * The @atomic_disable callback is optional. */ void (*atomic_disable)(struct drm_bridge *bridge, - struct drm_atomic_state *old_state); + struct drm_bridge_state *old_bridge_state); /** * @atomic_post_disable: @@ -337,7 +358,275 @@ struct drm_bridge_funcs { * The @atomic_post_disable callback is optional. */ void (*atomic_post_disable)(struct drm_bridge *bridge, - struct drm_atomic_state *old_state); + struct drm_bridge_state *old_bridge_state); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current bridge state object (which is guaranteed to be + * non-NULL). + * + * The atomic_duplicate_state hook is mandatory if the bridge + * implements any of the atomic hooks, and should be left unassigned + * otherwise. For bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_duplicate_state() helper function shall be + * used to implement this hook. + * + * RETURNS: + * A valid drm_bridge_state object or NULL if the allocation fails. + */ + struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge); + + /** + * @atomic_destroy_state: + * + * Destroy a bridge state object previously allocated by + * &drm_bridge_funcs.atomic_duplicate_state(). + * + * The atomic_destroy_state hook is mandatory if the bridge implements + * any of the atomic hooks, and should be left unassigned otherwise. + * For bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_destroy_state() helper function shall be + * used to implement this hook. + */ + void (*atomic_destroy_state)(struct drm_bridge *bridge, + struct drm_bridge_state *state); + + /** + * @atomic_get_output_bus_fmts: + * + * Return the supported bus formats on the output end of a bridge. + * The returned array must be allocated with kmalloc() and will be + * freed by the caller. If the allocation fails, NULL should be + * returned. num_output_fmts must be set to the returned array size. + * Formats listed in the returned array should be listed in decreasing + * preference order (the core will try all formats until it finds one + * that works). + * + * This method is only called on the last element of the bridge chain + * as part of the bus format negotiation process that happens in + * &drm_atomic_bridge_chain_select_bus_fmts(). + * This method is optional. When not implemented, the core will + * fall back to &drm_connector.display_info.bus_formats[0] if + * &drm_connector.display_info.num_bus_formats > 0, + * or to MEDIA_BUS_FMT_FIXED otherwise. + */ + u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts); + + /** + * @atomic_get_input_bus_fmts: + * + * Return the supported bus formats on the input end of a bridge for + * a specific output bus format. + * + * The returned array must be allocated with kmalloc() and will be + * freed by the caller. If the allocation fails, NULL should be + * returned. num_output_fmts must be set to the returned array size. + * Formats listed in the returned array should be listed in decreasing + * preference order (the core will try all formats until it finds one + * that works). When the format is not supported NULL should be + * returned and num_output_fmts should be set to 0. + * + * This method is called on all elements of the bridge chain as part of + * the bus format negotiation process that happens in + * drm_atomic_bridge_chain_select_bus_fmts(). + * This method is optional. When not implemented, the core will bypass + * bus format negotiation on this element of the bridge without + * failing, and the previous element in the chain will be passed + * MEDIA_BUS_FMT_FIXED as its output bus format. + * + * Bridge drivers that need to support being linked to bridges that are + * not supporting bus format negotiation should handle the + * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a + * sensible default value or extracting this information from somewhere + * else (FW property, &drm_display_mode, &drm_display_info, ...) + * + * Note: Even if input format selection on the first bridge has no + * impact on the negotiation process (bus format negotiation stops once + * we reach the first element of the chain), drivers are expected to + * return accurate input formats as the input format may be used to + * configure the CRTC output appropriately. + */ + u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + + /** + * @atomic_check: + * + * This method is responsible for checking bridge state correctness. + * It can also check the state of the surrounding components in chain + * to make sure the whole pipeline can work properly. + * + * &drm_bridge_funcs.atomic_check() hooks are called in reverse + * order (from the last to the first bridge). + * + * This method is optional. &drm_bridge_funcs.mode_fixup() is not + * called when &drm_bridge_funcs.atomic_check() is implemented, so only + * one of them should be provided. + * + * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or + * &drm_bridge_state.output_bus_cfg.flags it should should happen in + * this function. By default the &drm_bridge_state.output_bus_cfg.flags + * field is set to the next bridge + * &drm_bridge_state.input_bus_cfg.flags value or + * &drm_connector.display_info.bus_flags if the bridge is the last + * element in the chain. + * + * RETURNS: + * zero if the check passed, a negative error code otherwise. + */ + int (*atomic_check)(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + + /** + * @atomic_reset: + * + * Reset the bridge to a predefined state (or retrieve its current + * state) and return a &drm_bridge_state object matching this state. + * This function is called at attach time. + * + * The atomic_reset hook is mandatory if the bridge implements any of + * the atomic hooks, and should be left unassigned otherwise. For + * bridges that don't subclass &drm_bridge_state, the + * drm_atomic_helper_bridge_reset() helper function shall be used to + * implement this hook. + * + * Note that the atomic_reset() semantics is not exactly matching the + * reset() semantics found on other components (connector, plane, ...). + * + * 1. The reset operation happens when the bridge is attached, not when + * drm_mode_config_reset() is called + * 2. It's meant to be used exclusively on bridges that have been + * converted to the ATOMIC API + * + * RETURNS: + * A valid drm_bridge_state object in case of success, an ERR_PTR() + * giving the reason of the failure otherwise. + */ + struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge); + + /** + * @detect: + * + * Check if anything is attached to the bridge output. + * + * This callback is optional, if not implemented the bridge will be + * considered as always having a component attached to its output. + * Bridges that implement this callback shall set the + * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops. + * + * RETURNS: + * + * drm_connector_status indicating the bridge output status. + */ + enum drm_connector_status (*detect)(struct drm_bridge *bridge); + + /** + * @get_modes: + * + * Fill all modes currently valid for the sink into the &drm_connector + * with drm_mode_probed_add(). + * + * The @get_modes callback is mostly intended to support non-probeable + * displays such as many fixed panels. Bridges that support reading + * EDID shall leave @get_modes unimplemented and implement the + * &drm_bridge_funcs->get_edid callback instead. + * + * This callback is optional. Bridges that implement it shall set the + * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops. + * + * The connector parameter shall be used for the sole purpose of + * filling modes, and shall not be stored internally by bridge drivers + * for future usage. + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @get_edid: + * + * Read and parse the EDID data of the connected display. + * + * The @get_edid callback is the preferred way of reporting mode + * information for a display connected to the bridge output. Bridges + * that support reading EDID shall implement this callback and leave + * the @get_modes callback unimplemented. + * + * The caller of this operation shall first verify the output + * connection status and refrain from reading EDID from a disconnected + * output. + * + * This callback is optional. Bridges that implement it shall set the + * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops. + * + * The connector parameter shall be used for the sole purpose of EDID + * retrieval and parsing, and shall not be stored internally by bridge + * drivers for future usage. + * + * RETURNS: + * + * An edid structure newly allocated with kmalloc() (or similar) on + * success, or NULL otherwise. The caller is responsible for freeing + * the returned edid structure with kfree(). + */ + struct edid *(*get_edid)(struct drm_bridge *bridge, + struct drm_connector *connector); + + /** + * @hpd_notify: + * + * Notify the bridge of hot plug detection. + * + * This callback is optional, it may be implemented by bridges that + * need to be notified of display connection or disconnection for + * internal reasons. One use case is to reset the internal state of CEC + * controllers for HDMI bridges. + */ + void (*hpd_notify)(struct drm_bridge *bridge, + enum drm_connector_status status); + + /** + * @hpd_enable: + * + * Enable hot plug detection. From now on the bridge shall call + * drm_bridge_hpd_notify() each time a change is detected in the output + * connection status, until hot plug detection gets disabled with + * @hpd_disable. + * + * This callback is optional and shall only be implemented by bridges + * that support hot-plug notification without polling. Bridges that + * implement it shall also implement the @hpd_disable callback and set + * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. + */ + void (*hpd_enable)(struct drm_bridge *bridge); + + /** + * @hpd_disable: + * + * Disable hot plug detection. Once this function returns the bridge + * shall not call drm_bridge_hpd_notify() when a change in the output + * connection status occurs. + * + * This callback is optional and shall only be implemented by bridges + * that support hot-plug notification without polling. Bridges that + * implement it shall also implement the @hpd_enable callback and set + * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. + */ + void (*hpd_disable)(struct drm_bridge *bridge); }; /** @@ -377,9 +666,44 @@ struct drm_bridge_timings { }; /** + * enum drm_bridge_ops - Bitmask of operations supported by the bridge + */ +enum drm_bridge_ops { + /** + * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to + * its output. Bridges that set this flag shall implement the + * &drm_bridge_funcs->detect callback. + */ + DRM_BRIDGE_OP_DETECT = BIT(0), + /** + * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display + * connected to its output. Bridges that set this flag shall implement + * the &drm_bridge_funcs->get_edid callback. + */ + DRM_BRIDGE_OP_EDID = BIT(1), + /** + * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug + * without requiring polling. Bridges that set this flag shall + * implement the &drm_bridge_funcs->hpd_enable and + * &drm_bridge_funcs->hpd_disable callbacks if they support enabling + * and disabling hot-plug detection dynamically. + */ + DRM_BRIDGE_OP_HPD = BIT(2), + /** + * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported + * by the display at its output. This does not include reading EDID + * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set + * this flag shall implement the &drm_bridge_funcs->get_modes callback. + */ + DRM_BRIDGE_OP_MODES = BIT(3), +}; + +/** * struct drm_bridge - central DRM bridge control structure */ struct drm_bridge { + /** @base: inherit from &drm_private_object */ + struct drm_private_obj base; /** @dev: DRM device this bridge belongs to */ struct drm_device *dev; /** @encoder: encoder to which this bridge is connected */ @@ -402,13 +726,52 @@ struct drm_bridge { const struct drm_bridge_funcs *funcs; /** @driver_private: pointer to the bridge driver's internal context */ void *driver_private; + /** @ops: bitmask of operations supported by the bridge */ + enum drm_bridge_ops ops; + /** + * @type: Type of the connection at the bridge output + * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this + * identifies the type of connected display. + */ + int type; + /** + * @interlace_allowed: Indicate that the bridge can handle interlaced + * modes. + */ + bool interlace_allowed; + /** + * @ddc: Associated I2C adapter for DDC access, if any. + */ + struct i2c_adapter *ddc; + /** private: */ + /** + * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields. + */ + struct mutex hpd_mutex; + /** + * @hpd_cb: Hot plug detection callback, registered with + * drm_bridge_hpd_enable(). + */ + void (*hpd_cb)(void *data, enum drm_connector_status status); + /** + * @hpd_data: Private data passed to the Hot plug detection callback + * @hpd_cb. + */ + void *hpd_data; }; +static inline struct drm_bridge * +drm_priv_to_bridge(struct drm_private_obj *priv) +{ + return container_of(priv, struct drm_bridge, base); +} + void drm_bridge_add(struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); struct drm_bridge *of_drm_find_bridge(struct device_node *np); int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, - struct drm_bridge *previous); + struct drm_bridge *previous, + enum drm_bridge_attach_flags flags); /** * drm_bridge_get_next_bridge() - Get the next bridge in the chain @@ -482,6 +845,9 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge, void drm_bridge_chain_pre_enable(struct drm_bridge *bridge); void drm_bridge_chain_enable(struct drm_bridge *bridge); +int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, struct drm_atomic_state *state); void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, @@ -491,6 +857,27 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, struct drm_atomic_state *state); +u32 * +drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts); + +enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge); +int drm_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector); +struct edid *drm_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector); +void drm_bridge_hpd_enable(struct drm_bridge *bridge, + void (*cb)(void *data, + enum drm_connector_status status), + void *data); +void drm_bridge_hpd_disable(struct drm_bridge *bridge); +void drm_bridge_hpd_notify(struct drm_bridge *bridge, + enum drm_connector_status status); + #ifdef CONFIG_DRM_PANEL_BRIDGE struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel); struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h new file mode 100644 index 000000000000..33f6c3bbdb4a --- /dev/null +++ b/include/drm/drm_bridge_connector.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#ifndef __DRM_BRIDGE_CONNECTOR_H__ +#define __DRM_BRIDGE_CONNECTOR_H__ + +struct drm_connector; +struct drm_device; +struct drm_encoder; + +void drm_bridge_connector_enable_hpd(struct drm_connector *connector); +void drm_bridge_connector_disable_hpd(struct drm_connector *connector); +struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, + struct drm_encoder *encoder); + +#endif /* __DRM_BRIDGE_CONNECTOR_H__ */ diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 5cf2c5dd8b1e..3ed5dee899fd 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -44,6 +44,11 @@ struct drm_client_funcs { * returns zero gets the privilege to restore and no more clients are * called. This callback is not called after @unregister has been called. * + * Note that the core does not guarantee exclusion against concurrent + * drm_open(). Clients need to ensure this themselves, for example by + * using drm_master_internal_acquire() and + * drm_master_internal_release(). + * * This callback is optional. */ int (*restore)(struct drm_client_dev *client); @@ -156,7 +161,7 @@ int drm_client_modeset_create(struct drm_client_dev *client); void drm_client_modeset_free(struct drm_client_dev *client); int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height); bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation); -int drm_client_modeset_commit_force(struct drm_client_dev *client); +int drm_client_modeset_commit_locked(struct drm_client_dev *client); int drm_client_modeset_commit(struct drm_client_dev *client); int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 221910948b37..19ae6bb5c85b 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -254,6 +254,23 @@ enum drm_panel_orientation { DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +/** + * struct drm_monitor_range_info - Panel's Monitor range in EDID for + * &drm_display_info + * + * This struct is used to store a frequency range supported by panel + * as parsed from EDID's detailed monitor range descriptor block. + * + * @min_vfreq: This is the min supported refresh rate in Hz from + * EDID's detailed monitor range. + * @max_vfreq: This is the max supported refresh rate in Hz from + * EDID's detailed monitor range + */ +struct drm_monitor_range_info { + u8 min_vfreq; + u8 max_vfreq; +}; + /* * This is a consolidated colorimetry list supported by HDMI and * DP protocol standard. The respective connectors will register @@ -435,6 +452,14 @@ struct drm_display_info { bool dvi_dual; /** + * @is_hdmi: True if the sink is an HDMI device. + * + * This field shall be used instead of calling + * drm_detect_hdmi_monitor() when possible. + */ + bool is_hdmi; + + /** * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? */ bool has_hdmi_infoframe; @@ -465,6 +490,11 @@ struct drm_display_info { * @non_desktop: Non desktop display (HMD). */ bool non_desktop; + + /** + * @monitor_range: Frequency range supported by monitor range descriptor + */ + struct drm_monitor_range_info monitor_range; }; int drm_display_info_set_bus_formats(struct drm_display_info *info, @@ -1357,6 +1387,12 @@ struct drm_connector { * rev1.1 4.2.2.6 */ bool edid_corrupt; + /** + * @real_edid_checksum: real edid checksum for corrupted edid block. + * Required in Displayport 1.4 compliance testing + * rev1.1 4.2.2.6 + */ + u8 real_edid_checksum; /** @debugfs_entry: debugfs directory for this connector */ struct dentry *debugfs_entry; @@ -1512,6 +1548,7 @@ drm_connector_is_unregistered(struct drm_connector *connector) DRM_CONNECTOR_UNREGISTERED; } +const char *drm_get_connector_type_name(unsigned int connector_type); const char *drm_get_connector_status_name(enum drm_connector_status status); const char *drm_get_subpixel_order_name(enum subpixel_order order); const char *drm_get_dpms_name(int val); @@ -1552,8 +1589,13 @@ void drm_connector_set_link_status_property(struct drm_connector *connector, uint64_t link_status); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable); -int drm_connector_init_panel_orientation_property( - struct drm_connector *connector, int width, int height); +int drm_connector_set_panel_orientation( + struct drm_connector *connector, + enum drm_panel_orientation panel_orientation); +int drm_connector_set_panel_orientation_with_quirk( + struct drm_connector *connector, + enum drm_panel_orientation panel_orientation, + int width, int height); int drm_connector_attach_max_bpc_property(struct drm_connector *connector, int min, int max); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 5e9b15a0e8c5..59b51a09cae6 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -174,12 +174,25 @@ struct drm_crtc_state { * @no_vblank: * * Reflects the ability of a CRTC to send VBLANK events. This state - * usually depends on the pipeline configuration, and the main usuage - * is CRTCs feeding a writeback connector operating in oneshot mode. - * In this case the VBLANK event is only generated when a job is queued - * to the writeback connector, and we want the core to fake VBLANK - * events when this part of the pipeline hasn't changed but others had - * or when the CRTC and connectors are being disabled. + * usually depends on the pipeline configuration. If set to true, DRM + * atomic helpers will send out a fake VBLANK event during display + * updates after all hardware changes have been committed. This is + * implemented in drm_atomic_helper_fake_vblank(). + * + * One usage is for drivers and/or hardware without support for VBLANK + * interrupts. Such drivers typically do not initialize vblanking + * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs + * without initialized vblanking, this field is set to true in + * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be + * send out on each update of the display pipeline by + * drm_atomic_helper_fake_vblank(). + * + * Another usage is CRTCs feeding a writeback connector operating in + * oneshot mode. In this case the fake VBLANK event is only generated + * when a job is queued to the writeback connector, and we want the + * core to fake VBLANK events when this part of the pipeline hasn't + * changed but others had or when the CRTC and connectors are being + * disabled. * * __drm_atomic_helper_crtc_duplicate_state() will not reset the value * from the current state, the CRTC driver is then responsible for @@ -335,7 +348,14 @@ struct drm_crtc_state { * - Events for disabled CRTCs are not allowed, and drivers can ignore * that case. * - * This can be handled by the drm_crtc_send_vblank_event() function, + * For very simple hardware without VBLANK interrupt, enabling + * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers + * send a fake VBLANK event at the end of the display update after all + * hardware changes have been applied. See + * drm_atomic_helper_fake_vblank(). + * + * For more complex hardware this + * can be handled by the drm_crtc_send_vblank_event() function, * which the driver should call on the provided event upon completion of * the atomic commit. Note that if the driver supports vblank signalling * and timestamping the vblank counters and timestamps must agree with @@ -867,6 +887,47 @@ struct drm_crtc_funcs { * new drivers as the replacement of &drm_driver.disable_vblank hook. */ void (*disable_vblank)(struct drm_crtc *crtc); + + /** + * @get_vblank_timestamp: + * + * Called by drm_get_last_vbltimestamp(). Should return a precise + * timestamp when the most recent vblank interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of vblank will start scanning out, + * the time immediately after end of the vblank interval. If the + * @crtc is currently inside vblank, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * Parameters: + * + * crtc: + * CRTC for which timestamp should be returned. + * max_error: + * Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * vblank_time: + * Target location for returned vblank timestamp. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq quirks + * if flag is set. + * + * Returns: + * + * True on success, false on failure, which means the core should + * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). + */ + bool (*get_vblank_timestamp)(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); }; /** @@ -974,11 +1035,12 @@ struct drm_crtc { * Programmed mode in hw, after adjustments for encoders, crtc, panel * scaling etc. Should only be used by legacy drivers, for high * precision vblank timestamps in - * drm_calc_vbltimestamp_from_scanoutpos(). + * drm_crtc_vblank_helper_get_vblank_timestamp(). * * Note that atomic drivers should not use this, but instead use * &drm_crtc_state.adjusted_mode. And for high-precision timestamps - * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode, + * drm_crtc_vblank_helper_get_vblank_timestamp() used + * &drm_vblank_crtc.hwmode, * which is filled out by calling drm_calc_timestamping_constants(). */ struct drm_display_mode hwmode; diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 1acfc3bbd3fb..bb60a949f416 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -144,7 +144,7 @@ struct drm_device { * Usage counter for outstanding files open, * protected by drm_global_mutex */ - int open_count; + atomic_t open_count; /** @filelist_mutex: Protects @filelist. */ struct mutex filelist_mutex; diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index bc04467f7c3a..c6119e4c169a 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -392,6 +392,8 @@ # define DP_DS_12BPC 2 # define DP_DS_16BPC 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + /* DP Forward error Correction Registers */ #define DP_FEC_CAPABILITY 0x090 /* 1.4 */ # define DP_FEC_CAPABLE (1 << 0) @@ -1457,6 +1459,9 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, u8 status[DP_LINK_STATUS_SIZE]); +bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, + u8 real_edid_checksum); + int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE], const u8 port_cap[4]); int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE], @@ -1493,13 +1498,16 @@ struct drm_dp_desc { int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, bool is_branch); +u32 drm_dp_get_edid_quirks(const struct edid *edid); /** * enum drm_dp_quirk - Display Port sink/branch device specific quirks * * Display Port sink and branch devices in the wild have a variety of bugs, try * to collect them here. The quirks are shared, but it's up to the drivers to - * implement workarounds for them. + * implement workarounds for them. Note that because some devices have + * unreliable OUIDs, the EDID of sinks should also be checked for quirks using + * drm_dp_get_edid_quirks(). */ enum drm_dp_quirk { /** @@ -1530,19 +1538,31 @@ enum drm_dp_quirk { * The DSC caps can be read from the physical aux instead. */ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD, + /** + * @DP_QUIRK_FORCE_DPCD_BACKLIGHT: + * + * The device is telling the truth when it says that it uses DPCD + * backlight controls, even if the system's firmware disagrees. This + * quirk should be checked against both the ident and panel EDID. + * When present, the driver should honor the DPCD backlight + * capabilities advertised. + */ + DP_QUIRK_FORCE_DPCD_BACKLIGHT, }; /** * drm_dp_has_quirk() - does the DP device have a specific quirk * @desc: Device decriptor filled by drm_dp_read_desc() + * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks() * @quirk: Quirk to query for * * Return true if DP device identified by @desc has @quirk. */ static inline bool -drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) +drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks, + enum drm_dp_quirk quirk) { - return desc->quirks & BIT(quirk); + return (desc->quirks | edid_quirks) & BIT(quirk); } #ifdef CONFIG_DRM_DP_CEC diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index bcb39da9adb4..3cde42b333c3 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -81,7 +81,7 @@ struct drm_dp_vcpi { * &drm_dp_mst_topology_mgr.base.lock. * @num_sdp_stream_sinks: Number of stream sinks. Protected by * &drm_dp_mst_topology_mgr.base.lock. - * @available_pbn: Available bandwidth for this port. Protected by + * @full_pbn: Max possible bandwidth for this port. Protected by * &drm_dp_mst_topology_mgr.base.lock. * @next: link to next port on this branch device * @aux: i2c aux transport to talk to device connected to this port, protected @@ -126,7 +126,7 @@ struct drm_dp_mst_port { u8 dpcd_rev; u8 num_sdp_streams; u8 num_sdp_stream_sinks; - uint16_t available_pbn; + uint16_t full_pbn; struct list_head next; /** * @mstb: the branch device connected to this port, if there is one. @@ -479,7 +479,6 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); - void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); }; @@ -591,6 +590,11 @@ struct drm_dp_mst_topology_mgr { bool payload_id_table_cleared : 1; /** + * @is_waiting_for_dwn_reply: whether we're waiting for a down reply. + */ + bool is_waiting_for_dwn_reply : 1; + + /** * @mst_primary: Pointer to the primary/first branch device. */ struct drm_dp_mst_branch *mst_primary; @@ -620,11 +624,6 @@ struct drm_dp_mst_topology_mgr { struct mutex qlock; /** - * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply - */ - bool is_waiting_for_dwn_reply; - - /** * @tx_msg_downq: List of pending down replies. */ struct list_head tx_msg_downq; @@ -635,11 +634,13 @@ struct drm_dp_mst_topology_mgr { struct mutex payload_lock; /** * @proposed_vcpis: Array of pointers for the new VCPI allocation. The - * VCPI structure itself is &drm_dp_mst_port.vcpi. + * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of + * this array is determined by @max_payloads. */ struct drm_dp_vcpi **proposed_vcpis; /** - * @payloads: Array of payloads. + * @payloads: Array of payloads. The size of this array is determined + * by @max_payloads. */ struct drm_dp_payload *payloads; /** diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index cf13470810a5..97109df5beac 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -269,159 +269,6 @@ struct drm_driver { void (*release) (struct drm_device *); /** - * @get_vblank_counter: - * - * Driver callback for fetching a raw hardware vblank counter for the - * CRTC specified with the pipe argument. If a device doesn't have a - * hardware counter, the driver can simply leave the hook as NULL. - * The DRM core will account for missed vblank events while interrupts - * where disabled based on system timestamps. - * - * Wraparound handling and loss of events due to modesetting is dealt - * with in the DRM core code, as long as drivers call - * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or - * enabling a CRTC. - * - * This is deprecated and should not be used by new drivers. - * Use &drm_crtc_funcs.get_vblank_counter instead. - * - * Returns: - * - * Raw vblank counter value. - */ - u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe); - - /** - * @enable_vblank: - * - * Enable vblank interrupts for the CRTC specified with the pipe - * argument. - * - * This is deprecated and should not be used by new drivers. - * Use &drm_crtc_funcs.enable_vblank instead. - * - * Returns: - * - * Zero on success, appropriate errno if the given @crtc's vblank - * interrupt cannot be enabled. - */ - int (*enable_vblank) (struct drm_device *dev, unsigned int pipe); - - /** - * @disable_vblank: - * - * Disable vblank interrupts for the CRTC specified with the pipe - * argument. - * - * This is deprecated and should not be used by new drivers. - * Use &drm_crtc_funcs.disable_vblank instead. - */ - void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); - - /** - * @get_scanout_position: - * - * Called by vblank timestamping code. - * - * Returns the current display scanout position from a crtc, and an - * optional accurate ktime_get() timestamp of when position was - * measured. Note that this is a helper callback which is only used if a - * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the - * @get_vblank_timestamp callback. - * - * Parameters: - * - * dev: - * DRM device. - * pipe: - * Id of the crtc to query. - * in_vblank_irq: - * True when called from drm_crtc_handle_vblank(). Some drivers - * need to apply some workarounds for gpu-specific vblank irq quirks - * if flag is set. - * vpos: - * Target location for current vertical scanout position. - * hpos: - * Target location for current horizontal scanout position. - * stime: - * Target location for timestamp taken immediately before - * scanout position query. Can be NULL to skip timestamp. - * etime: - * Target location for timestamp taken immediately after - * scanout position query. Can be NULL to skip timestamp. - * mode: - * Current display timings. - * - * Returns vpos as a positive number while in active scanout area. - * Returns vpos as a negative number inside vblank, counting the number - * of scanlines to go until end of vblank, e.g., -1 means "one scanline - * until start of active scanout / end of vblank." - * - * Returns: - * - * True on success, false if a reliable scanout position counter could - * not be read out. - * - * FIXME: - * - * Since this is a helper to implement @get_vblank_timestamp, we should - * move it to &struct drm_crtc_helper_funcs, like all the other - * helper-internal hooks. - */ - bool (*get_scanout_position) (struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); - - /** - * @get_vblank_timestamp: - * - * Called by drm_get_last_vbltimestamp(). Should return a precise - * timestamp when the most recent VBLANK interval ended or will end. - * - * Specifically, the timestamp in @vblank_time should correspond as - * closely as possible to the time when the first video scanline of - * the video frame after the end of VBLANK will start scanning out, - * the time immediately after end of the VBLANK interval. If the - * @crtc is currently inside VBLANK, this will be a time in the future. - * If the @crtc is currently scanning out a frame, this will be the - * past start time of the current scanout. This is meant to adhere - * to the OpenML OML_sync_control extension specification. - * - * Paramters: - * - * dev: - * dev DRM device handle. - * pipe: - * crtc for which timestamp should be returned. - * max_error: - * Maximum allowable timestamp error in nanoseconds. - * Implementation should strive to provide timestamp - * with an error of at most max_error nanoseconds. - * Returns true upper bound on error for timestamp. - * vblank_time: - * Target location for returned vblank timestamp. - * in_vblank_irq: - * True when called from drm_crtc_handle_vblank(). Some drivers - * need to apply some workarounds for gpu-specific vblank irq quirks - * if flag is set. - * - * Returns: - * - * True on success, false on failure, which means the core should - * fallback to a simple timestamp taken in drm_crtc_handle_vblank(). - * - * FIXME: - * - * We should move this hook to &struct drm_crtc_funcs like all the other - * vblank hooks. - */ - bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, - int *max_error, - ktime_t *vblank_time, - bool in_vblank_irq); - - /** * @irq_handler: * * Interrupt handler called when using drm_irq_install(). Not used by @@ -458,20 +305,6 @@ struct drm_driver { void (*irq_uninstall) (struct drm_device *dev); /** - * @master_create: - * - * Called whenever a new master is created. Only used by vmwgfx. - */ - int (*master_create)(struct drm_device *dev, struct drm_master *master); - - /** - * @master_destroy: - * - * Called whenever a master is destroyed. Only used by vmwgfx. - */ - void (*master_destroy)(struct drm_device *dev, struct drm_master *master); - - /** * @master_set: * * Called whenever the minor master is set. Only used by vmwgfx. @@ -775,6 +608,9 @@ struct drm_driver { int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_quiescent) (struct drm_device *); int (*context_dtor) (struct drm_device *dev, int context); + u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); + int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); + void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); int dev_priv_size; }; @@ -824,6 +660,25 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev) } /** + * drm_core_check_all_features - check driver feature flags mask + * @dev: DRM device to check + * @features: feature flag(s) mask + * + * This checks @dev for driver features, see &drm_driver.driver_features, + * &drm_device.driver_features, and the various &enum drm_driver_feature flags. + * + * Returns true if all features in the @features mask are supported, false + * otherwise. + */ +static inline bool drm_core_check_all_features(const struct drm_device *dev, + u32 features) +{ + u32 supported = dev->driver->driver_features & dev->driver_features; + + return features && (supported & features) == features; +} + +/** * drm_core_check_feature - check driver feature flags * @dev: DRM device to check * @feature: feature flag @@ -833,9 +688,10 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev) * * Returns true if the @feature is supported, false otherwise. */ -static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature) +static inline bool drm_core_check_feature(const struct drm_device *dev, + enum drm_driver_feature feature) { - return dev->driver->driver_features & dev->driver_features & feature; + return drm_core_check_all_features(dev, feature); } /** diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index f0b03d401c27..34b15e3d070c 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -91,6 +91,11 @@ struct detailed_data_string { u8 str[13]; } __attribute__((packed)); +#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 +#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 +#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 +#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 + struct detailed_data_monitor_range { u8 min_vfreq; u8 max_vfreq; diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 5623994b6e9e..4370e039c015 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -174,7 +174,8 @@ struct drm_encoder { struct drm_crtc *crtc; /** - * @bridge_chain: Bridges attached to this encoder. + * @bridge_chain: Bridges attached to this encoder. Drivers shall not + * access this field directly. */ struct list_head bridge_chain; diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 1c6633da0f91..208dbf87afa3 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -213,8 +213,7 @@ drm_fb_helper_from_client(struct drm_client_dev *client) #ifdef CONFIG_DRM_FBDEV_EMULATION void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs); -int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, int max_conn); +int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper); void drm_fb_helper_fini(struct drm_fb_helper *helper); int drm_fb_helper_blank(int blank, struct fb_info *info); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, @@ -279,8 +278,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev, } static inline int drm_fb_helper_init(struct drm_device *dev, - struct drm_fb_helper *helper, - int max_conn) + struct drm_fb_helper *helper) { /* So drivers can use it to free the struct */ helper->dev = dev; @@ -453,27 +451,6 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) #endif -/* TODO: There's a todo entry to remove these three */ -static inline int -drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) -{ - return 0; -} - -static inline int -drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector) -{ - return 0; -} - -static inline int -drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, - struct drm_connector *connector) -{ - return 0; -} - /** * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers * @a: memory range, users of which are to be removed diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 8b099b347817..5aaf1c4593a9 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -374,6 +374,7 @@ int drm_open(struct inode *inode, struct file *filp); ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); int drm_release(struct inode *inode, struct file *filp); +int drm_release_noglobal(struct inode *inode, struct file *filp); __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); int drm_event_reserve_init_locked(struct drm_device *dev, struct drm_file *file_priv, @@ -390,4 +391,13 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e); struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags); +#ifdef CONFIG_MMU +struct drm_vma_offset_manager; +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr); +#endif /* CONFIG_MMU */ + + #endif /* _DRM_FILE_H_ */ diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index e34a7b7f848a..294b2931c4cc 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -96,6 +96,11 @@ struct drm_gem_shmem_object { * The address are un-mapped when the count reaches zero. */ unsigned int vmap_use_count; + + /** + * @map_cached: map object cached (instead of using writecombine). + */ + bool map_cached; }; #define to_drm_gem_shmem_obj(obj) \ diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 573e9fd109bf..0f6e47213d8d 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -6,6 +6,7 @@ #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> +#include <drm/drm_modes.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> @@ -205,4 +206,12 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm( struct drm_device *dev, uint64_t vram_base, size_t vram_size); void drm_vram_helper_release_mm(struct drm_device *dev); +/* + * Mode-config helpers + */ + +enum drm_mode_status +drm_vram_helper_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode); + #endif diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index 06a11202a097..c6bab4986a65 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -276,7 +276,7 @@ void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) #define DRM_HDCP_2_VRL_LENGTH_SIZE 3 #define DRM_HDCP_2_DCP_SIG_SIZE 384 #define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4 -#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6) +#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6) struct hdcp_srm_header { u8 srm_id; @@ -288,8 +288,8 @@ struct hdcp_srm_header { struct drm_device; struct drm_connector; -bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev, - u8 *ksvs, u32 ksv_count); +int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, + u8 *ksvs, u32 ksv_count); int drm_connector_attach_content_protection_property( struct drm_connector *connector, bool hdcp_content_type); void drm_hdcp_update_content_protection(struct drm_connector *connector, diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 5745710453c8..dcef3598f49e 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -194,17 +194,11 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock); #ifdef CONFIG_PCI -void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); #else -static inline void __drm_legacy_pci_free(struct drm_device *dev, - drm_dma_handle_t *dmah) -{ -} - static inline int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) { diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 67c66f5ee591..33f325f5af2b 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -110,6 +110,18 @@ struct mipi_dbi_dev { unsigned int rotation; /** + * @left_offset: Horizontal offset of the display relative to the + * controller's driver array + */ + unsigned int left_offset; + + /** + * @top_offset: Vertical offset of the display relative to the + * controller's driver array + */ + unsigned int top_offset; + + /** * @backlight: backlight device (optional) */ struct backlight_device *backlight; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index d7939c054259..ee8b0e80ca90 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -272,7 +272,7 @@ static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) */ static inline bool drm_mm_initialized(const struct drm_mm *mm) { - return mm->hole_stack.next; + return READ_ONCE(mm->hole_stack.next); } /** diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index e946e20c61d8..99134d4f35eb 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -371,20 +371,13 @@ struct drm_display_mode { int crtc_vtotal; /** - * @private: + * @private_flags: * - * Pointer for driver private data. This can only be used for mode + * Driver private flags. private_flags can only be used for mode * objects passed to drivers in modeset operations. It shouldn't be used * by atomic drivers since they can store any additional data by * subclassing state structures. */ - int *private; - - /** - * @private_flags: - * - * Similar to @private, but just an integer. - */ int private_flags; /** diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 5a87f1bd7a3f..7c20b1c8b6a7 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -450,6 +450,53 @@ struct drm_crtc_helper_funcs { */ void (*atomic_disable)(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state); + + /** + * @get_scanout_position: + * + * Called by vblank timestamping code. + * + * Returns the current display scanout position from a CRTC and an + * optional accurate ktime_get() timestamp of when the position was + * measured. Note that this is a helper callback which is only used + * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp() + * for the @drm_crtc_funcs.get_vblank_timestamp callback. + * + * Parameters: + * + * crtc: + * The CRTC. + * in_vblank_irq: + * True when called from drm_crtc_handle_vblank(). Some drivers + * need to apply some workarounds for gpu-specific vblank irq + * quirks if the flag is set. + * vpos: + * Target location for current vertical scanout position. + * hpos: + * Target location for current horizontal scanout position. + * stime: + * Target location for timestamp taken immediately before + * scanout position query. Can be NULL to skip timestamp. + * etime: + * Target location for timestamp taken immediately after + * scanout position query. Can be NULL to skip timestamp. + * mode: + * Current display timings. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * Returns: + * + * True on success, false if a reliable scanout position counter could + * not be read out. + */ + bool (*get_scanout_position)(struct drm_crtc *crtc, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); }; /** @@ -646,22 +693,6 @@ struct drm_encoder_helper_funcs { struct drm_connector_state *conn_state); /** - * @get_crtc: - * - * This callback is used by the legacy CRTC helpers to work around - * deficiencies in its own book-keeping. - * - * Do not use, use atomic helpers instead, which get the book keeping - * right. - * - * FIXME: - * - * Currently only nouveau is using this, and as soon as nouveau is - * atomic we can ditch this hook. - */ - struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); - - /** * @detect: * * This callback can be used by drivers who want to do detection on the diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 121f7aabccd1..6193cb555acc 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -198,7 +198,8 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np) } #endif -#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE) +#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ + (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE))) int drm_panel_of_backlight(struct drm_panel *panel); #else static inline int drm_panel_of_backlight(struct drm_panel *panel) diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h index 9031e217b506..3941b0255ecf 100644 --- a/include/drm/drm_pci.h +++ b/include/drm/drm_pci.h @@ -45,10 +45,6 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align); void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); -int drm_get_pci_dev(struct pci_dev *pdev, - const struct pci_device_id *ent, - struct drm_driver *driver); - #else static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, @@ -62,13 +58,6 @@ static inline void drm_pci_free(struct drm_device *dev, { } -static inline int drm_get_pci_dev(struct pci_dev *pdev, - const struct pci_device_id *ent, - struct drm_driver *driver) -{ - return -ENOSYS; -} - #endif #endif /* _DRM_PCI_H_ */ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index 8f99d389792d..ca7cee8e728a 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -382,42 +382,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category, drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \ - drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \ - drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \ - drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \ - drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__) - -#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - if (__ratelimit(&_rs)) \ - drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \ -}) - -/** - * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. - * - * @dev: device pointer - * @fmt: printf() like format string. - */ -#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \ - _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \ - fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \ - fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \ - fmt, ##__VA_ARGS__) -#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \ - fmt, ##__VA_ARGS__) /* * struct drm_device based logging @@ -541,16 +505,42 @@ void __drm_err(const char *format, ...); __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) -#define DRM_DEBUG_RATELIMITED(fmt, ...) \ - DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__) +#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \ +}) + +/* + * struct drm_device based WARNs + * + * drm_WARN*() acts like WARN*(), but with the key difference of + * using device specific information so that we know from which device + * warning is originating from. + * + * Prefer drm_device based drm_WARN* over regular WARN* + */ -#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \ - DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__) +/* Helper for struct drm_device based WARNs */ +#define drm_WARN(drm, condition, format, arg...) \ + WARN(condition, "%s %s: " format, \ + dev_driver_string((drm)->dev), \ + dev_name((drm)->dev), ## arg) -#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \ - DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__) +#define drm_WARN_ONCE(drm, condition, format, arg...) \ + WARN_ONCE(condition, "%s %s: " format, \ + dev_driver_string((drm)->dev), \ + dev_name((drm)->dev), ## arg) + +#define drm_WARN_ON(drm, x) \ + drm_WARN((drm), (x), "%s", \ + "drm_WARN_ON(" __stringify(x) ")") -#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \ - DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__) +#define drm_WARN_ON_ONCE(drm, x) \ + drm_WARN_ONCE((drm), (x), "%s", \ + "drm_WARN_ON_ONCE(" __stringify(x) ")") #endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index 15afee9cf049..a026375464ff 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -100,8 +100,11 @@ struct drm_simple_display_pipe_funcs { * This is the function drivers should submit the * &drm_pending_vblank_event from. Using either * drm_crtc_arm_vblank_event(), when the driver supports vblank - * interrupt handling, or drm_crtc_send_vblank_event() directly in case - * the hardware lacks vblank support entirely. + * interrupt handling, or drm_crtc_send_vblank_event() for more + * complex case. In case the hardware lacks vblank support entirely, + * drivers can set &struct drm_crtc_state.no_vblank in + * &struct drm_simple_display_pipe_funcs.check and let DRM's + * atomic helper fake a vblank event. */ void (*update)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_plane_state); @@ -178,4 +181,8 @@ int drm_simple_display_pipe_init(struct drm_device *dev, const uint64_t *format_modifiers, struct drm_connector *connector); +int drm_simple_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + int encoder_type); + #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index c16c44052b3d..dd9f5b9e56e4 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -174,13 +174,13 @@ struct drm_vblank_crtc { unsigned int pipe; /** * @framedur_ns: Frame/Field duration in ns, used by - * drm_calc_vbltimestamp_from_scanoutpos() and computed by + * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by * drm_calc_timestamping_constants(). */ int framedur_ns; /** * @linedur_ns: Line duration in ns, used by - * drm_calc_vbltimestamp_from_scanoutpos() and computed by + * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by * drm_calc_timestamping_constants(). */ int linedur_ns; @@ -190,8 +190,8 @@ struct drm_vblank_crtc { * * Cache of the current hardware display mode. Only valid when @enabled * is set. This is used by helpers like - * drm_calc_vbltimestamp_from_scanoutpos(). We can't just access the - * hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode, + * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access + * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode, * because that one is really hard to get from interrupt context. */ struct drm_display_mode hwmode; @@ -206,6 +206,7 @@ struct drm_vblank_crtc { }; int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); +bool drm_dev_has_vblank(const struct drm_device *dev); u64 drm_crtc_vblank_count(struct drm_crtc *crtc); u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime); @@ -229,13 +230,32 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); void drm_vblank_restore(struct drm_device *dev, unsigned int pipe); void drm_crtc_vblank_restore(struct drm_crtc *crtc); -bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, - unsigned int pipe, int *max_error, - ktime_t *vblank_time, - bool in_vblank_irq); void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode); wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc); void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, u32 max_vblank_count); + +/* + * Helpers for struct drm_crtc_funcs + */ + +typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc, + bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, + ktime_t *etime, + const struct drm_display_mode *mode); + +bool +drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq, + drm_vblank_get_scanout_position_func get_scanout_position); +bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); + #endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 589be851f8a1..26b04ff62676 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -262,7 +262,7 @@ struct drm_sched_backend_ops { * @job_list_lock: lock to protect the ring_mirror_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. - * @score: score to help loadbalancer pick a idle sched + * @num_jobs: the number of jobs in queue in the scheduler * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. * @@ -283,8 +283,8 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit; - atomic_t score; - bool ready; + atomic_t num_jobs; + bool ready; bool free_guilty; }; @@ -297,6 +297,10 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner); +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); + void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); @@ -337,5 +341,8 @@ void drm_sched_fence_finished(struct drm_sched_fence *fence); unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, unsigned long remaining); +struct drm_gpu_scheduler * +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list); #endif diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h index 4d48de8890ca..702f613243bb 100644 --- a/include/drm/i915_mei_hdcp_interface.h +++ b/include/drm/i915_mei_hdcp_interface.h @@ -12,7 +12,6 @@ #include <linux/mutex.h> #include <linux/device.h> #include <drm/drm_hdcp.h> -#include <drm/i915_drm.h> /** * enum hdcp_port_type - HDCP port implementation type defined by ME FW diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 66ca49db9633..0a9d042e075a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -135,18 +135,14 @@ struct ttm_tt; * @num_pages: Actual number of pages. * @acc_size: Accounted size for this object. * @kref: Reference count of this buffer object. When this refcount reaches - * zero, the object is put on the delayed delete list. - * @list_kref: List reference count of this buffer object. This member is - * used to avoid destruction while the buffer object is still on a list. - * Lru lists may keep one refcount, the delayed delete list, and kref != 0 - * keeps one refcount. When this refcount reaches zero, - * the object is destroyed. + * zero, the object is destroyed or put on the delayed delete list. * @mem: structure describing current placement. * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. * @ttm: TTM structure holding system pages. * @evicted: Whether the object was evicted without user-space knowing. + * @deleted: True if the object is only a zombie and already deleted. * @lru: List head for the lru list. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. @@ -183,9 +179,7 @@ struct ttm_buffer_object { /** * Members not needing protection. */ - struct kref kref; - struct kref list_kref; /** * Members protected by the bo::resv::reserved lock. @@ -195,6 +189,7 @@ struct ttm_buffer_object { struct file *persistent_swap_storage; struct ttm_tt *ttm; bool evicted; + bool deleted; /** * Members protected by the bdev::lru_lock. @@ -732,7 +727,8 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, - pgoff_t num_prefault); + pgoff_t num_prefault, + pgoff_t fault_page_size); vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index cac7a8a0825a..c9e0fd09f4b2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -210,8 +210,6 @@ struct ttm_mem_type_manager { * struct ttm_bo_driver * * @create_ttm_backend_entry: Callback to create a struct ttm_backend. - * @invalidate_caches: Callback to invalidate read caches when a buffer object - * has been evicted. * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager * structure. * @evict_flags: Callback to obtain placement flags when a buffer is evicted. @@ -256,19 +254,6 @@ struct ttm_bo_driver { */ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm); - /** - * struct ttm_bo_driver member invalidate_caches - * - * @bdev: the buffer object device. - * @flags: new placement of the rebound buffer object. - * - * A previosly evicted buffer has been rebound in a - * potentially new location. Tell the driver that it might - * consider invalidating read (texture) caches on the next command - * submission as a consequence. - */ - - int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags); int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man); diff --git a/include/dt-bindings/arm/coresight-cti-dt.h b/include/dt-bindings/arm/coresight-cti-dt.h new file mode 100644 index 000000000000..61e7bdf8ea6e --- /dev/null +++ b/include/dt-bindings/arm/coresight-cti-dt.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for the defined trigger signal + * types on CoreSight CTI. + */ + +#ifndef _DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H +#define _DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H + +#define GEN_IO 0 +#define GEN_INTREQ 1 +#define GEN_INTACK 2 +#define GEN_HALTREQ 3 +#define GEN_RESTARTREQ 4 +#define PE_EDBGREQ 5 +#define PE_DBGRESTART 6 +#define PE_CTIIRQ 7 +#define PE_PMUIRQ 8 +#define PE_DBGTRIGGER 9 +#define ETM_EXTOUT 10 +#define ETM_EXTIN 11 +#define SNK_FULL 12 +#define SNK_ACQCOMP 13 +#define SNK_FLUSHCOMP 14 +#define SNK_FLUSHIN 15 +#define SNK_TRIGIN 16 +#define STM_ASYNCOUT 17 +#define STM_TOUT_SPTE 18 +#define STM_TOUT_SW 19 +#define STM_TOUT_HETE 20 +#define STM_HWEVENT 21 +#define ELA_TSTART 22 +#define ELA_TSTOP 23 +#define ELA_DBGREQ 24 +#define CTI_TRIG_MAX 25 + +#endif /*_DT_BINDINGS_ARM_CORESIGHT_CTI_DT_H */ diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h index babd08a1d226..76b07826ed05 100644 --- a/include/dt-bindings/bus/ti-sysc.h +++ b/include/dt-bindings/bus/ti-sysc.h @@ -18,6 +18,10 @@ #define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4) +/* PRUSS sysc found on AM33xx/AM43xx/AM57xx */ +#define SYSC_PRUSS_SUB_MWAIT (1 << 5) +#define SYSC_PRUSS_STANDBY_INIT (1 << 4) + /* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ #define SYSC_IDLE_FORCE 0 #define SYSC_IDLE_NO 1 diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h index f0f04e0a249e..33b8826d936b 100644 --- a/include/dt-bindings/clock/dm814.h +++ b/include/dt-bindings/clock/dm814.h @@ -34,4 +34,9 @@ #define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220) #define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224) +/* alwon_ethernet clocks */ +#define DM814_ETHERNET_CLKCTRL_OFFSET 0x1d4 +#define DM814_ETHERNET_CLKCTRL_INDEX(offset) ((offset) - DM814_ETHERNET_CLKCTRL_OFFSET) +#define DM814_ETHERNET_CPGMAC0_CLKCTRL DM814_ETHERNET_CLKCTRL_INDEX(0x1d4) + #endif diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h index 0837c1a7ae49..b0d65d73db96 100644 --- a/include/dt-bindings/clock/g12a-clkc.h +++ b/include/dt-bindings/clock/g12a-clkc.h @@ -143,5 +143,7 @@ #define CLKID_CPU1_CLK 253 #define CLKID_CPU2_CLK 254 #define CLKID_CPU3_CLK 255 +#define CLKID_SPICC0_SCLK 258 +#define CLKID_SPICC1_SCLK 261 #endif /* __G12A_CLKC_H */ diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index db0763e96173..4073eb7a9da1 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -146,5 +146,6 @@ #define CLKID_CTS_VDAC 201 #define CLKID_HDMI_TX 202 #define CLKID_HDMI 205 +#define CLKID_ACODEC 206 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index e6a670e1a3f8..1d4c0dfe0202 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -451,5 +451,6 @@ #define IMX7D_SNVS_CLK 442 #define IMX7D_CAAM_CLK 443 #define IMX7D_KPP_ROOT_CLK 444 -#define IMX7D_CLK_END 445 +#define IMX7D_PXP_CLK 445 +#define IMX7D_CLK_END 446 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h index edeece2289f0..e63a5530aed7 100644 --- a/include/dt-bindings/clock/imx8mm-clock.h +++ b/include/dt-bindings/clock/imx8mm-clock.h @@ -265,6 +265,15 @@ #define IMX8MM_SYS_PLL2_333M_CG 244 #define IMX8MM_SYS_PLL2_500M_CG 245 -#define IMX8MM_CLK_END 246 +#define IMX8MM_CLK_M4_CORE 246 +#define IMX8MM_CLK_VPU_CORE 247 +#define IMX8MM_CLK_GPU3D_CORE 248 +#define IMX8MM_CLK_GPU2D_CORE 249 + +#define IMX8MM_CLK_CLKO2 250 + +#define IMX8MM_CLK_A53_CORE 251 + +#define IMX8MM_CLK_END 252 #endif diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h index 0f2b8423ce1d..621ea0e87c67 100644 --- a/include/dt-bindings/clock/imx8mn-clock.h +++ b/include/dt-bindings/clock/imx8mn-clock.h @@ -122,8 +122,8 @@ #define IMX8MN_CLK_I2C1 105 #define IMX8MN_CLK_I2C2 106 #define IMX8MN_CLK_I2C3 107 -#define IMX8MN_CLK_I2C4 118 -#define IMX8MN_CLK_UART1 119 +#define IMX8MN_CLK_I2C4 108 +#define IMX8MN_CLK_UART1 109 #define IMX8MN_CLK_UART2 110 #define IMX8MN_CLK_UART3 111 #define IMX8MN_CLK_UART4 112 @@ -228,6 +228,12 @@ #define IMX8MN_SYS_PLL2_333M_CG 209 #define IMX8MN_SYS_PLL2_500M_CG 210 -#define IMX8MN_CLK_END 211 +#define IMX8MN_CLK_SNVS_ROOT 211 +#define IMX8MN_CLK_GPU_CORE 212 +#define IMX8MN_CLK_GPU_SHADER 213 + +#define IMX8MN_CLK_A53_CORE 214 + +#define IMX8MN_CLK_END 215 #endif diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 2fab63186bca..47ab082238b4 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -173,7 +173,7 @@ #define IMX8MP_CLK_IPP_DO_CLKO1 164 #define IMX8MP_CLK_IPP_DO_CLKO2 165 #define IMX8MP_CLK_HDMI_FDCC_TST 166 -#define IMX8MP_CLK_HDMI_27M 167 +#define IMX8MP_CLK_HDMI_24M 167 #define IMX8MP_CLK_HDMI_REF_266M 168 #define IMX8MP_CLK_USDHC3 169 #define IMX8MP_CLK_MEDIA_CAM1_PIX 170 @@ -294,7 +294,8 @@ #define IMX8MP_CLK_DRAM_ALT_ROOT 285 #define IMX8MP_CLK_DRAM_CORE 286 #define IMX8MP_CLK_ARM 287 +#define IMX8MP_CLK_A53_CORE 288 -#define IMX8MP_CLK_END 288 +#define IMX8MP_CLK_END 289 #endif diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index 3bab9b21c8d7..9b8045d75b8b 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -424,6 +424,13 @@ #define IMX8MQ_SYS2_PLL_500M_CG 283 #define IMX8MQ_SYS2_PLL_1000M_CG 284 -#define IMX8MQ_CLK_END 285 +#define IMX8MQ_CLK_GPU_CORE 285 +#define IMX8MQ_CLK_GPU_SHADER 286 +#define IMX8MQ_CLK_M4_CORE 287 +#define IMX8MQ_CLK_VPU_CORE 288 + +#define IMX8MQ_CLK_A53_CORE 289 + +#define IMX8MQ_CLK_END 290 #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 4b1a7724f20d..06bb7fe4c62f 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -26,6 +26,9 @@ #define MMP2_CLK_VCTCXO_4 25 #define MMP2_CLK_UART_PLL 26 #define MMP2_CLK_USB_PLL 27 +#define MMP3_CLK_PLL1_P 28 +#define MMP3_CLK_PLL2_P 29 +#define MMP3_CLK_PLL3 30 /* apb periphrals */ #define MMP2_CLK_TWSI0 60 @@ -50,6 +53,10 @@ #define MMP2_CLK_SSP2 79 #define MMP2_CLK_SSP3 80 #define MMP2_CLK_TIMER 81 +#define MMP2_CLK_THERMAL0 82 +#define MMP3_CLK_THERMAL1 83 +#define MMP3_CLK_THERMAL2 84 +#define MMP3_CLK_THERMAL3 85 /* axi periphrals */ #define MMP2_CLK_SDH0 101 @@ -74,6 +81,12 @@ #define MMP2_CLK_DISP0_LCDC 120 #define MMP2_CLK_USBHSIC0 121 #define MMP2_CLK_USBHSIC1 122 +#define MMP2_CLK_GPU_BUS 123 +#define MMP3_CLK_GPU_BUS MMP2_CLK_GPU_BUS +#define MMP2_CLK_GPU_3D 124 +#define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D +#define MMP3_CLK_GPU_2D 125 +#define MMP3_CLK_SDH4 126 #define MMP2_NR_CLKS 200 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h index e8029b2e92d7..1258fd05db68 100644 --- a/include/dt-bindings/clock/qcom,gcc-sc7180.h +++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */ #ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H @@ -132,6 +132,11 @@ #define GCC_VIDEO_GPLL0_DIV_CLK_SRC 122 #define GCC_VIDEO_THROTTLE_AXI_CLK 123 #define GCC_VIDEO_XO_CLK 124 +#define GCC_MSS_CFG_AHB_CLK 125 +#define GCC_MSS_MFAB_AXIS_CLK 126 +#define GCC_MSS_NAV_AXI_CLK 127 +#define GCC_MSS_Q6_MEMNOC_AXI_CLK 128 +#define GCC_MSS_SNOC_AXI_CLK 129 /* GCC resets */ #define GCC_QUSB2PHY_PRIM_BCR 0 diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h index 90d60ef94c64..3e1a91876610 100644 --- a/include/dt-bindings/clock/qcom,gcc-sm8150.h +++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h @@ -240,4 +240,8 @@ #define GCC_USB30_SEC_BCR 27 #define GCC_USB_PHY_CFG_AHB2PHY_BCR 28 +/* GCC GDSCRs */ +#define USB30_PRIM_GDSC 4 +#define USB30_SEC_GDSC 5 + #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm8250.h b/include/dt-bindings/clock/qcom,gcc-sm8250.h new file mode 100644 index 000000000000..7b7abe327e37 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sm8250.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8250_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SM8250_H + +/* GCC clocks */ +#define GPLL0 0 +#define GPLL0_OUT_EVEN 1 +#define GPLL4 2 +#define GPLL9 3 +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 4 +#define GCC_AGGRE_UFS_CARD_AXI_CLK 5 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 6 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 7 +#define GCC_AGGRE_USB3_SEC_AXI_CLK 8 +#define GCC_BOOT_ROM_AHB_CLK 9 +#define GCC_CAMERA_AHB_CLK 10 +#define GCC_CAMERA_HF_AXI_CLK 11 +#define GCC_CAMERA_SF_AXI_CLK 12 +#define GCC_CAMERA_XO_CLK 13 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 14 +#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 15 +#define GCC_CPUSS_AHB_CLK 16 +#define GCC_CPUSS_AHB_CLK_SRC 17 +#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 18 +#define GCC_CPUSS_DVM_BUS_CLK 19 +#define GCC_CPUSS_RBCPR_CLK 20 +#define GCC_DDRSS_GPU_AXI_CLK 21 +#define GCC_DDRSS_PCIE_SF_TBU_CLK 22 +#define GCC_DISP_AHB_CLK 23 +#define GCC_DISP_HF_AXI_CLK 24 +#define GCC_DISP_SF_AXI_CLK 25 +#define GCC_DISP_XO_CLK 26 +#define GCC_GP1_CLK 27 +#define GCC_GP1_CLK_SRC 28 +#define GCC_GP2_CLK 29 +#define GCC_GP2_CLK_SRC 30 +#define GCC_GP3_CLK 31 +#define GCC_GP3_CLK_SRC 32 +#define GCC_GPU_CFG_AHB_CLK 33 +#define GCC_GPU_GPLL0_CLK_SRC 34 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 35 +#define GCC_GPU_IREF_EN 36 +#define GCC_GPU_MEMNOC_GFX_CLK 37 +#define GCC_GPU_SNOC_DVM_GFX_CLK 38 +#define GCC_NPU_AXI_CLK 39 +#define GCC_NPU_BWMON_AXI_CLK 40 +#define GCC_NPU_BWMON_CFG_AHB_CLK 41 +#define GCC_NPU_CFG_AHB_CLK 42 +#define GCC_NPU_DMA_CLK 43 +#define GCC_NPU_GPLL0_CLK_SRC 44 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 45 +#define GCC_PCIE0_PHY_REFGEN_CLK 46 +#define GCC_PCIE1_PHY_REFGEN_CLK 47 +#define GCC_PCIE2_PHY_REFGEN_CLK 48 +#define GCC_PCIE_0_AUX_CLK 49 +#define GCC_PCIE_0_AUX_CLK_SRC 50 +#define GCC_PCIE_0_CFG_AHB_CLK 51 +#define GCC_PCIE_0_MSTR_AXI_CLK 52 +#define GCC_PCIE_0_PIPE_CLK 53 +#define GCC_PCIE_0_SLV_AXI_CLK 54 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 55 +#define GCC_PCIE_1_AUX_CLK 56 +#define GCC_PCIE_1_AUX_CLK_SRC 57 +#define GCC_PCIE_1_CFG_AHB_CLK 58 +#define GCC_PCIE_1_MSTR_AXI_CLK 59 +#define GCC_PCIE_1_PIPE_CLK 60 +#define GCC_PCIE_1_SLV_AXI_CLK 61 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 62 +#define GCC_PCIE_2_AUX_CLK 63 +#define GCC_PCIE_2_AUX_CLK_SRC 64 +#define GCC_PCIE_2_CFG_AHB_CLK 65 +#define GCC_PCIE_2_MSTR_AXI_CLK 66 +#define GCC_PCIE_2_PIPE_CLK 67 +#define GCC_PCIE_2_SLV_AXI_CLK 68 +#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 69 +#define GCC_PCIE_MDM_CLKREF_EN 70 +#define GCC_PCIE_PHY_AUX_CLK 71 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 72 +#define GCC_PCIE_WIFI_CLKREF_EN 73 +#define GCC_PCIE_WIGIG_CLKREF_EN 74 +#define GCC_PDM2_CLK 75 +#define GCC_PDM2_CLK_SRC 76 +#define GCC_PDM_AHB_CLK 77 +#define GCC_PDM_XO4_CLK 78 +#define GCC_PRNG_AHB_CLK 79 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 80 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 81 +#define GCC_QMIP_DISP_AHB_CLK 82 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 83 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 84 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 85 +#define GCC_QUPV3_WRAP0_CORE_CLK 86 +#define GCC_QUPV3_WRAP0_S0_CLK 87 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 88 +#define GCC_QUPV3_WRAP0_S1_CLK 89 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 90 +#define GCC_QUPV3_WRAP0_S2_CLK 91 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 92 +#define GCC_QUPV3_WRAP0_S3_CLK 93 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 94 +#define GCC_QUPV3_WRAP0_S4_CLK 95 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 96 +#define GCC_QUPV3_WRAP0_S5_CLK 97 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 98 +#define GCC_QUPV3_WRAP0_S6_CLK 99 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 100 +#define GCC_QUPV3_WRAP0_S7_CLK 101 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 102 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 103 +#define GCC_QUPV3_WRAP1_CORE_CLK 104 +#define GCC_QUPV3_WRAP1_S0_CLK 105 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 106 +#define GCC_QUPV3_WRAP1_S1_CLK 107 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 108 +#define GCC_QUPV3_WRAP1_S2_CLK 109 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 110 +#define GCC_QUPV3_WRAP1_S3_CLK 111 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 112 +#define GCC_QUPV3_WRAP1_S4_CLK 113 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 114 +#define GCC_QUPV3_WRAP1_S5_CLK 115 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 116 +#define GCC_QUPV3_WRAP2_CORE_2X_CLK 117 +#define GCC_QUPV3_WRAP2_CORE_CLK 118 +#define GCC_QUPV3_WRAP2_S0_CLK 119 +#define GCC_QUPV3_WRAP2_S0_CLK_SRC 120 +#define GCC_QUPV3_WRAP2_S1_CLK 121 +#define GCC_QUPV3_WRAP2_S1_CLK_SRC 122 +#define GCC_QUPV3_WRAP2_S2_CLK 123 +#define GCC_QUPV3_WRAP2_S2_CLK_SRC 124 +#define GCC_QUPV3_WRAP2_S3_CLK 125 +#define GCC_QUPV3_WRAP2_S3_CLK_SRC 126 +#define GCC_QUPV3_WRAP2_S4_CLK 127 +#define GCC_QUPV3_WRAP2_S4_CLK_SRC 128 +#define GCC_QUPV3_WRAP2_S5_CLK 129 +#define GCC_QUPV3_WRAP2_S5_CLK_SRC 130 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 131 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 132 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 133 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 134 +#define GCC_QUPV3_WRAP_2_M_AHB_CLK 135 +#define GCC_QUPV3_WRAP_2_S_AHB_CLK 136 +#define GCC_SDCC2_AHB_CLK 137 +#define GCC_SDCC2_APPS_CLK 138 +#define GCC_SDCC2_APPS_CLK_SRC 139 +#define GCC_SDCC4_AHB_CLK 140 +#define GCC_SDCC4_APPS_CLK 141 +#define GCC_SDCC4_APPS_CLK_SRC 142 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 143 +#define GCC_TSIF_AHB_CLK 144 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 145 +#define GCC_TSIF_REF_CLK 146 +#define GCC_TSIF_REF_CLK_SRC 147 +#define GCC_UFS_1X_CLKREF_EN 148 +#define GCC_UFS_CARD_AHB_CLK 149 +#define GCC_UFS_CARD_AXI_CLK 150 +#define GCC_UFS_CARD_AXI_CLK_SRC 151 +#define GCC_UFS_CARD_ICE_CORE_CLK 152 +#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 153 +#define GCC_UFS_CARD_PHY_AUX_CLK 154 +#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 155 +#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 156 +#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 157 +#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 158 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK 159 +#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 160 +#define GCC_UFS_PHY_AHB_CLK 161 +#define GCC_UFS_PHY_AXI_CLK 162 +#define GCC_UFS_PHY_AXI_CLK_SRC 163 +#define GCC_UFS_PHY_ICE_CORE_CLK 164 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 165 +#define GCC_UFS_PHY_PHY_AUX_CLK 166 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 167 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 168 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 169 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 170 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 171 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 172 +#define GCC_USB30_PRIM_MASTER_CLK 173 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 174 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177 +#define GCC_USB30_PRIM_SLEEP_CLK 178 +#define GCC_USB30_SEC_MASTER_CLK 179 +#define GCC_USB30_SEC_MASTER_CLK_SRC 180 +#define GCC_USB30_SEC_MOCK_UTMI_CLK 181 +#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 182 +#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 183 +#define GCC_USB30_SEC_SLEEP_CLK 184 +#define GCC_USB3_PRIM_PHY_AUX_CLK 185 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 186 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 187 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 188 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 189 +#define GCC_USB3_SEC_CLKREF_EN 190 +#define GCC_USB3_SEC_PHY_AUX_CLK 191 +#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 192 +#define GCC_USB3_SEC_PHY_COM_AUX_CLK 193 +#define GCC_USB3_SEC_PHY_PIPE_CLK 194 +#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 195 +#define GCC_VIDEO_AHB_CLK 196 +#define GCC_VIDEO_AXI0_CLK 197 +#define GCC_VIDEO_AXI1_CLK 198 +#define GCC_VIDEO_XO_CLK 199 + +/* GCC resets */ +#define GCC_GPU_BCR 0 +#define GCC_MMSS_BCR 1 +#define GCC_NPU_BWMON_BCR 2 +#define GCC_NPU_BCR 3 +#define GCC_PCIE_0_BCR 4 +#define GCC_PCIE_0_LINK_DOWN_BCR 5 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6 +#define GCC_PCIE_0_PHY_BCR 7 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8 +#define GCC_PCIE_1_BCR 9 +#define GCC_PCIE_1_LINK_DOWN_BCR 10 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11 +#define GCC_PCIE_1_PHY_BCR 12 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13 +#define GCC_PCIE_2_BCR 14 +#define GCC_PCIE_2_LINK_DOWN_BCR 15 +#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 16 +#define GCC_PCIE_2_PHY_BCR 17 +#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 18 +#define GCC_PCIE_PHY_BCR 19 +#define GCC_PCIE_PHY_CFG_AHB_BCR 20 +#define GCC_PCIE_PHY_COM_BCR 21 +#define GCC_PDM_BCR 22 +#define GCC_PRNG_BCR 23 +#define GCC_QUPV3_WRAPPER_0_BCR 24 +#define GCC_QUPV3_WRAPPER_1_BCR 25 +#define GCC_QUPV3_WRAPPER_2_BCR 26 +#define GCC_QUSB2PHY_PRIM_BCR 27 +#define GCC_QUSB2PHY_SEC_BCR 28 +#define GCC_SDCC2_BCR 29 +#define GCC_SDCC4_BCR 30 +#define GCC_TSIF_BCR 31 +#define GCC_UFS_CARD_BCR 32 +#define GCC_UFS_PHY_BCR 33 +#define GCC_USB30_PRIM_BCR 34 +#define GCC_USB30_SEC_BCR 35 +#define GCC_USB3_DP_PHY_PRIM_BCR 36 +#define GCC_USB3_DP_PHY_SEC_BCR 37 +#define GCC_USB3_PHY_PRIM_BCR 38 +#define GCC_USB3_PHY_SEC_BCR 39 +#define GCC_USB3PHY_PHY_PRIM_BCR 40 +#define GCC_USB3PHY_PHY_SEC_BCR 41 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 42 +#define GCC_VIDEO_AXI0_CLK_ARES 43 +#define GCC_VIDEO_AXI1_CLK_ARES 44 + +/* GCC power domains */ +#define PCIE_0_GDSC 0 +#define PCIE_1_GDSC 1 +#define PCIE_2_GDSC 2 +#define UFS_CARD_GDSC 3 +#define UFS_PHY_GDSC 4 +#define USB30_PRIM_GDSC 5 +#define USB30_SEC_GDSC 6 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 7 +#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 8 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 9 +#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 10 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7180.h b/include/dt-bindings/clock/qcom,gpucc-sc7180.h index 0e4643b08b49..65e706d7d9c6 100644 --- a/include/dt-bindings/clock/qcom,gpucc-sc7180.h +++ b/include/dt-bindings/clock/qcom,gpucc-sc7180.h @@ -15,7 +15,8 @@ #define GPU_CC_CXO_CLK 6 #define GPU_CC_GMU_CLK_SRC 7 -/* CAM_CC GDSCRs */ +/* GPU_CC GDSCRs */ #define CX_GDSC 0 +#define GX_GDSC 1 #endif diff --git a/include/dt-bindings/clock/qcom,mss-sc7180.h b/include/dt-bindings/clock/qcom,mss-sc7180.h new file mode 100644 index 000000000000..f15a9ded2961 --- /dev/null +++ b/include/dt-bindings/clock/qcom,mss-sc7180.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H +#define _DT_BINDINGS_CLK_QCOM_MSS_SC7180_H + +#define MSS_AXI_CRYPTO_CLK 0 +#define MSS_AXI_NAV_CLK 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 8e3095720552..ae74c43c485d 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -37,6 +37,10 @@ #define RPM_XO_A0 27 #define RPM_XO_A1 28 #define RPM_XO_A2 29 +#define RPM_NSS_FABRIC_0_CLK 30 +#define RPM_NSS_FABRIC_0_A_CLK 31 +#define RPM_NSS_FABRIC_1_CLK 32 +#define RPM_NSS_FABRIC_1_A_CLK 33 /* SMD RPM clocks */ #define RPM_SMD_XO_CLK_SRC 0 diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h index edcab3f7b7d3..2e6c54e65455 100644 --- a/include/dt-bindings/clock/qcom,rpmh.h +++ b/include/dt-bindings/clock/qcom,rpmh.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */ +/* Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. */ #ifndef _DT_BINDINGS_CLK_MSM_RPMH_H @@ -19,5 +19,7 @@ #define RPMH_RF_CLK3 10 #define RPMH_RF_CLK3_A 11 #define RPMH_IPA_CLK 12 +#define RPMH_LN_BB_CLK1 13 +#define RPMH_LN_BB_CLK1_A 14 #endif diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h new file mode 100644 index 000000000000..901ba59676c2 --- /dev/null +++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Unisoc SC9863A platform clocks + * + * Copyright (C) 2019, Unisoc Communications Inc. + */ + +#ifndef _DT_BINDINGS_CLK_SC9863A_H_ +#define _DT_BINDINGS_CLK_SC9863A_H_ + +#define CLK_MPLL0_GATE 0 +#define CLK_DPLL0_GATE 1 +#define CLK_LPLL_GATE 2 +#define CLK_GPLL_GATE 3 +#define CLK_DPLL1_GATE 4 +#define CLK_MPLL1_GATE 5 +#define CLK_MPLL2_GATE 6 +#define CLK_ISPPLL_GATE 7 +#define CLK_PMU_APB_NUM (CLK_ISPPLL_GATE + 1) + +#define CLK_AUDIO_GATE 0 +#define CLK_RPLL 1 +#define CLK_RPLL_390M 2 +#define CLK_RPLL_260M 3 +#define CLK_RPLL_195M 4 +#define CLK_RPLL_26M 5 +#define CLK_ANLG_PHY_G5_NUM (CLK_RPLL_26M + 1) + +#define CLK_TWPLL 0 +#define CLK_TWPLL_768M 1 +#define CLK_TWPLL_384M 2 +#define CLK_TWPLL_192M 3 +#define CLK_TWPLL_96M 4 +#define CLK_TWPLL_48M 5 +#define CLK_TWPLL_24M 6 +#define CLK_TWPLL_12M 7 +#define CLK_TWPLL_512M 8 +#define CLK_TWPLL_256M 9 +#define CLK_TWPLL_128M 10 +#define CLK_TWPLL_64M 11 +#define CLK_TWPLL_307M2 12 +#define CLK_TWPLL_219M4 13 +#define CLK_TWPLL_170M6 14 +#define CLK_TWPLL_153M6 15 +#define CLK_TWPLL_76M8 16 +#define CLK_TWPLL_51M2 17 +#define CLK_TWPLL_38M4 18 +#define CLK_TWPLL_19M2 19 +#define CLK_LPLL 20 +#define CLK_LPLL_409M6 21 +#define CLK_LPLL_245M76 22 +#define CLK_GPLL 23 +#define CLK_ISPPLL 24 +#define CLK_ISPPLL_468M 25 +#define CLK_ANLG_PHY_G1_NUM (CLK_ISPPLL_468M + 1) + +#define CLK_DPLL0 0 +#define CLK_DPLL1 1 +#define CLK_DPLL0_933M 2 +#define CLK_DPLL0_622M3 3 +#define CLK_DPLL0_400M 4 +#define CLK_DPLL0_266M7 5 +#define CLK_DPLL0_123M1 6 +#define CLK_DPLL0_50M 7 +#define CLK_ANLG_PHY_G7_NUM (CLK_DPLL0_50M + 1) + +#define CLK_MPLL0 0 +#define CLK_MPLL1 1 +#define CLK_MPLL2 2 +#define CLK_MPLL2_675M 3 +#define CLK_ANLG_PHY_G4_NUM (CLK_MPLL2_675M + 1) + +#define CLK_AP_APB 0 +#define CLK_AP_CE 1 +#define CLK_NANDC_ECC 2 +#define CLK_NANDC_26M 3 +#define CLK_EMMC_32K 4 +#define CLK_SDIO0_32K 5 +#define CLK_SDIO1_32K 6 +#define CLK_SDIO2_32K 7 +#define CLK_OTG_UTMI 8 +#define CLK_AP_UART0 9 +#define CLK_AP_UART1 10 +#define CLK_AP_UART2 11 +#define CLK_AP_UART3 12 +#define CLK_AP_UART4 13 +#define CLK_AP_I2C0 14 +#define CLK_AP_I2C1 15 +#define CLK_AP_I2C2 16 +#define CLK_AP_I2C3 17 +#define CLK_AP_I2C4 18 +#define CLK_AP_I2C5 19 +#define CLK_AP_I2C6 20 +#define CLK_AP_SPI0 21 +#define CLK_AP_SPI1 22 +#define CLK_AP_SPI2 23 +#define CLK_AP_SPI3 24 +#define CLK_AP_IIS0 25 +#define CLK_AP_IIS1 26 +#define CLK_AP_IIS2 27 +#define CLK_SIM0 28 +#define CLK_SIM0_32K 29 +#define CLK_AP_CLK_NUM (CLK_SIM0_32K + 1) + +#define CLK_13M 0 +#define CLK_6M5 1 +#define CLK_4M3 2 +#define CLK_2M 3 +#define CLK_250K 4 +#define CLK_RCO_25M 5 +#define CLK_RCO_4M 6 +#define CLK_RCO_2M 7 +#define CLK_EMC 8 +#define CLK_AON_APB 9 +#define CLK_ADI 10 +#define CLK_AUX0 11 +#define CLK_AUX1 12 +#define CLK_AUX2 13 +#define CLK_PROBE 14 +#define CLK_PWM0 15 +#define CLK_PWM1 16 +#define CLK_PWM2 17 +#define CLK_AON_THM 18 +#define CLK_AUDIF 19 +#define CLK_CPU_DAP 20 +#define CLK_CPU_TS 21 +#define CLK_DJTAG_TCK 22 +#define CLK_EMC_REF 23 +#define CLK_CSSYS 24 +#define CLK_AON_PMU 25 +#define CLK_PMU_26M 26 +#define CLK_AON_TMR 27 +#define CLK_POWER_CPU 28 +#define CLK_AP_AXI 29 +#define CLK_SDIO0_2X 30 +#define CLK_SDIO1_2X 31 +#define CLK_SDIO2_2X 32 +#define CLK_EMMC_2X 33 +#define CLK_DPU 34 +#define CLK_DPU_DPI 35 +#define CLK_OTG_REF 36 +#define CLK_SDPHY_APB 37 +#define CLK_ALG_IO_APB 38 +#define CLK_GPU_CORE 39 +#define CLK_GPU_SOC 40 +#define CLK_MM_EMC 41 +#define CLK_MM_AHB 42 +#define CLK_BPC 43 +#define CLK_DCAM_IF 44 +#define CLK_ISP 45 +#define CLK_JPG 46 +#define CLK_CPP 47 +#define CLK_SENSOR0 48 +#define CLK_SENSOR1 49 +#define CLK_SENSOR2 50 +#define CLK_MM_VEMC 51 +#define CLK_MM_VAHB 52 +#define CLK_VSP 53 +#define CLK_CORE0 54 +#define CLK_CORE1 55 +#define CLK_CORE2 56 +#define CLK_CORE3 57 +#define CLK_CORE4 58 +#define CLK_CORE5 59 +#define CLK_CORE6 60 +#define CLK_CORE7 61 +#define CLK_SCU 62 +#define CLK_ACE 63 +#define CLK_AXI_PERIPH 64 +#define CLK_AXI_ACP 65 +#define CLK_ATB 66 +#define CLK_DEBUG_APB 67 +#define CLK_GIC 68 +#define CLK_PERIPH 69 +#define CLK_AON_CLK_NUM (CLK_VSP + 1) + +#define CLK_OTG_EB 0 +#define CLK_DMA_EB 1 +#define CLK_CE_EB 2 +#define CLK_NANDC_EB 3 +#define CLK_SDIO0_EB 4 +#define CLK_SDIO1_EB 5 +#define CLK_SDIO2_EB 6 +#define CLK_EMMC_EB 7 +#define CLK_EMMC_32K_EB 8 +#define CLK_SDIO0_32K_EB 9 +#define CLK_SDIO1_32K_EB 10 +#define CLK_SDIO2_32K_EB 11 +#define CLK_NANDC_26M_EB 12 +#define CLK_DMA_EB2 13 +#define CLK_CE_EB2 14 +#define CLK_AP_AHB_GATE_NUM (CLK_CE_EB2 + 1) + +#define CLK_GPIO_EB 0 +#define CLK_PWM0_EB 1 +#define CLK_PWM1_EB 2 +#define CLK_PWM2_EB 3 +#define CLK_PWM3_EB 4 +#define CLK_KPD_EB 5 +#define CLK_AON_SYST_EB 6 +#define CLK_AP_SYST_EB 7 +#define CLK_AON_TMR_EB 8 +#define CLK_EFUSE_EB 9 +#define CLK_EIC_EB 10 +#define CLK_INTC_EB 11 +#define CLK_ADI_EB 12 +#define CLK_AUDIF_EB 13 +#define CLK_AUD_EB 14 +#define CLK_VBC_EB 15 +#define CLK_PIN_EB 16 +#define CLK_AP_WDG_EB 17 +#define CLK_MM_EB 18 +#define CLK_AON_APB_CKG_EB 19 +#define CLK_CA53_TS0_EB 20 +#define CLK_CA53_TS1_EB 21 +#define CLK_CS53_DAP_EB 22 +#define CLK_PMU_EB 23 +#define CLK_THM_EB 24 +#define CLK_AUX0_EB 25 +#define CLK_AUX1_EB 26 +#define CLK_AUX2_EB 27 +#define CLK_PROBE_EB 28 +#define CLK_EMC_REF_EB 29 +#define CLK_CA53_WDG_EB 30 +#define CLK_AP_TMR1_EB 31 +#define CLK_AP_TMR2_EB 32 +#define CLK_DISP_EMC_EB 33 +#define CLK_ZIP_EMC_EB 34 +#define CLK_GSP_EMC_EB 35 +#define CLK_MM_VSP_EB 36 +#define CLK_MDAR_EB 37 +#define CLK_RTC4M0_CAL_EB 38 +#define CLK_RTC4M1_CAL_EB 39 +#define CLK_DJTAG_EB 40 +#define CLK_MBOX_EB 41 +#define CLK_AON_DMA_EB 42 +#define CLK_AON_APB_DEF_EB 43 +#define CLK_CA5_TS0_EB 44 +#define CLK_DBG_EB 45 +#define CLK_DBG_EMC_EB 46 +#define CLK_CROSS_TRIG_EB 47 +#define CLK_SERDES_DPHY_EB 48 +#define CLK_ARCH_RTC_EB 49 +#define CLK_KPD_RTC_EB 50 +#define CLK_AON_SYST_RTC_EB 51 +#define CLK_AP_SYST_RTC_EB 52 +#define CLK_AON_TMR_RTC_EB 53 +#define CLK_AP_TMR0_RTC_EB 54 +#define CLK_EIC_RTC_EB 55 +#define CLK_EIC_RTCDV5_EB 56 +#define CLK_AP_WDG_RTC_EB 57 +#define CLK_CA53_WDG_RTC_EB 58 +#define CLK_THM_RTC_EB 59 +#define CLK_ATHMA_RTC_EB 60 +#define CLK_GTHMA_RTC_EB 61 +#define CLK_ATHMA_RTC_A_EB 62 +#define CLK_GTHMA_RTC_A_EB 63 +#define CLK_AP_TMR1_RTC_EB 64 +#define CLK_AP_TMR2_RTC_EB 65 +#define CLK_DXCO_LC_RTC_EB 66 +#define CLK_BB_CAL_RTC_EB 67 +#define CLK_GNU_EB 68 +#define CLK_DISP_EB 69 +#define CLK_MM_EMC_EB 70 +#define CLK_POWER_CPU_EB 71 +#define CLK_HW_I2C_EB 72 +#define CLK_MM_VSP_EMC_EB 73 +#define CLK_VSP_EB 74 +#define CLK_CSSYS_EB 75 +#define CLK_DMC_EB 76 +#define CLK_ROSC_EB 77 +#define CLK_S_D_CFG_EB 78 +#define CLK_S_D_REF_EB 79 +#define CLK_B_DMA_EB 80 +#define CLK_ANLG_EB 81 +#define CLK_ANLG_APB_EB 82 +#define CLK_BSMTMR_EB 83 +#define CLK_AP_AXI_EB 84 +#define CLK_AP_INTC0_EB 85 +#define CLK_AP_INTC1_EB 86 +#define CLK_AP_INTC2_EB 87 +#define CLK_AP_INTC3_EB 88 +#define CLK_AP_INTC4_EB 89 +#define CLK_AP_INTC5_EB 90 +#define CLK_SCC_EB 91 +#define CLK_DPHY_CFG_EB 92 +#define CLK_DPHY_REF_EB 93 +#define CLK_CPHY_CFG_EB 94 +#define CLK_OTG_REF_EB 95 +#define CLK_SERDES_EB 96 +#define CLK_AON_AP_EMC_EB 97 +#define CLK_AON_APB_GATE_NUM (CLK_AON_AP_EMC_EB + 1) + +#define CLK_MAHB_CKG_EB 0 +#define CLK_MDCAM_EB 1 +#define CLK_MISP_EB 2 +#define CLK_MAHBCSI_EB 3 +#define CLK_MCSI_S_EB 4 +#define CLK_MCSI_T_EB 5 +#define CLK_DCAM_AXI_EB 6 +#define CLK_ISP_AXI_EB 7 +#define CLK_MCSI_EB 8 +#define CLK_MCSI_S_CKG_EB 9 +#define CLK_MCSI_T_CKG_EB 10 +#define CLK_SENSOR0_EB 11 +#define CLK_SENSOR1_EB 12 +#define CLK_SENSOR2_EB 13 +#define CLK_MCPHY_CFG_EB 14 +#define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_SPI0_EB 4 +#define CLK_SPI1_EB 5 +#define CLK_SPI2_EB 6 +#define CLK_I2C0_EB 7 +#define CLK_I2C1_EB 8 +#define CLK_I2C2_EB 9 +#define CLK_I2C3_EB 10 +#define CLK_I2C4_EB 11 +#define CLK_UART0_EB 12 +#define CLK_UART1_EB 13 +#define CLK_UART2_EB 14 +#define CLK_UART3_EB 15 +#define CLK_UART4_EB 16 +#define CLK_SIM0_32K_EB 17 +#define CLK_SPI3_EB 18 +#define CLK_I2C5_EB 19 +#define CLK_I2C6_EB 20 +#define CLK_AP_APB_GATE_NUM (CLK_I2C6_EB + 1) + +#endif /* _DT_BINDINGS_CLK_SC9863A_H_ */ diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h index e512a1c9b0fc..318eb15c414c 100644 --- a/include/dt-bindings/clock/sun50i-a64-ccu.h +++ b/include/dt-bindings/clock/sun50i-a64-ccu.h @@ -131,7 +131,7 @@ #define CLK_AVS 109 #define CLK_HDMI 110 #define CLK_HDMI_DDC 111 - +#define CLK_MBUS 112 #define CLK_DSI_DPHY 113 #define CLK_GPU 114 diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h index bb5c2c999c05..df59aaf5bf34 100644 --- a/include/dt-bindings/clock/tegra114-car.h +++ b/include/dt-bindings/clock/tegra114-car.h @@ -228,6 +228,8 @@ #define TEGRA114_CLK_CLK_M 201 #define TEGRA114_CLK_CLK_M_DIV2 202 #define TEGRA114_CLK_CLK_M_DIV4 203 +#define TEGRA114_CLK_OSC_DIV2 202 +#define TEGRA114_CLK_OSC_DIV4 203 #define TEGRA114_CLK_PLL_REF 204 #define TEGRA114_CLK_PLL_C 205 #define TEGRA114_CLK_PLL_C_OUT1 206 @@ -274,7 +276,7 @@ #define TEGRA114_CLK_CLK_OUT_2 246 #define TEGRA114_CLK_CLK_OUT_3 247 #define TEGRA114_CLK_BLINK 248 -/* 249 */ +#define TEGRA114_CLK_OSC 249 /* 250 */ /* 251 */ #define TEGRA114_CLK_XUSB_HOST_SRC 252 diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h index 0c4f5be0a742..2a9acd592bff 100644 --- a/include/dt-bindings/clock/tegra124-car-common.h +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -227,6 +227,8 @@ #define TEGRA124_CLK_CLK_M 201 #define TEGRA124_CLK_CLK_M_DIV2 202 #define TEGRA124_CLK_CLK_M_DIV4 203 +#define TEGRA124_CLK_OSC_DIV2 202 +#define TEGRA124_CLK_OSC_DIV4 203 #define TEGRA124_CLK_PLL_REF 204 #define TEGRA124_CLK_PLL_C 205 #define TEGRA124_CLK_PLL_C_OUT1 206 @@ -273,7 +275,7 @@ #define TEGRA124_CLK_CLK_OUT_2 246 #define TEGRA124_CLK_CLK_OUT_3 247 #define TEGRA124_CLK_BLINK 248 -/* 249 */ +#define TEGRA124_CLK_OSC 249 /* 250 */ /* 251 */ #define TEGRA124_CLK_XUSB_HOST_SRC 252 diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 44f60623f99b..7a8f10b9a66d 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -262,6 +262,8 @@ #define TEGRA210_CLK_CLK_M 233 #define TEGRA210_CLK_CLK_M_DIV2 234 #define TEGRA210_CLK_CLK_M_DIV4 235 +#define TEGRA210_CLK_OSC_DIV2 234 +#define TEGRA210_CLK_OSC_DIV4 235 #define TEGRA210_CLK_PLL_REF 236 #define TEGRA210_CLK_PLL_C 237 #define TEGRA210_CLK_PLL_C_OUT1 238 @@ -355,7 +357,7 @@ #define TEGRA210_CLK_PLL_A_OUT_ADSP 323 #define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 /* 325 */ -/* 326 */ +#define TEGRA210_CLK_OSC 326 /* 327 */ /* 328 */ /* 329 */ diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h index 3c90f1535551..7b542c10fc27 100644 --- a/include/dt-bindings/clock/tegra30-car.h +++ b/include/dt-bindings/clock/tegra30-car.h @@ -196,6 +196,8 @@ #define TEGRA30_CLK_CLK_M 171 #define TEGRA30_CLK_CLK_M_DIV2 172 #define TEGRA30_CLK_CLK_M_DIV4 173 +#define TEGRA30_CLK_OSC_DIV2 172 +#define TEGRA30_CLK_OSC_DIV4 173 #define TEGRA30_CLK_PLL_REF 174 #define TEGRA30_CLK_PLL_C 175 #define TEGRA30_CLK_PLL_C_OUT1 176 @@ -243,7 +245,7 @@ #define TEGRA30_CLK_HCLK 217 #define TEGRA30_CLK_PCLK 218 /* 219 */ -/* 220 */ +#define TEGRA30_CLK_OSC 220 /* 221 */ /* 222 */ /* 223 */ diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h new file mode 100644 index 000000000000..fbc1a3db2ea7 --- /dev/null +++ b/include/dt-bindings/display/sdtv-standards.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only or X11 */ +/* + * Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de> + */ + +#ifndef _DT_BINDINGS_DISPLAY_SDTV_STDS_H +#define _DT_BINDINGS_DISPLAY_SDTV_STDS_H + +/* + * Attention: Keep the SDTV_STD_* bit definitions in sync with + * include/uapi/linux/videodev2.h V4L2_STD_* bit definitions. + */ +/* One bit for each standard */ +#define SDTV_STD_PAL_B 0x00000001 +#define SDTV_STD_PAL_B1 0x00000002 +#define SDTV_STD_PAL_G 0x00000004 +#define SDTV_STD_PAL_H 0x00000008 +#define SDTV_STD_PAL_I 0x00000010 +#define SDTV_STD_PAL_D 0x00000020 +#define SDTV_STD_PAL_D1 0x00000040 +#define SDTV_STD_PAL_K 0x00000080 + +#define SDTV_STD_PAL (SDTV_STD_PAL_B | \ + SDTV_STD_PAL_B1 | \ + SDTV_STD_PAL_G | \ + SDTV_STD_PAL_H | \ + SDTV_STD_PAL_I | \ + SDTV_STD_PAL_D | \ + SDTV_STD_PAL_D1 | \ + SDTV_STD_PAL_K) + +#define SDTV_STD_PAL_M 0x00000100 +#define SDTV_STD_PAL_N 0x00000200 +#define SDTV_STD_PAL_Nc 0x00000400 +#define SDTV_STD_PAL_60 0x00000800 + +#define SDTV_STD_NTSC_M 0x00001000 /* BTSC */ +#define SDTV_STD_NTSC_M_JP 0x00002000 /* EIA-J */ +#define SDTV_STD_NTSC_443 0x00004000 +#define SDTV_STD_NTSC_M_KR 0x00008000 /* FM A2 */ + +#define SDTV_STD_NTSC (SDTV_STD_NTSC_M | \ + SDTV_STD_NTSC_M_JP | \ + SDTV_STD_NTSC_M_KR) + +#define SDTV_STD_SECAM_B 0x00010000 +#define SDTV_STD_SECAM_D 0x00020000 +#define SDTV_STD_SECAM_G 0x00040000 +#define SDTV_STD_SECAM_H 0x00080000 +#define SDTV_STD_SECAM_K 0x00100000 +#define SDTV_STD_SECAM_K1 0x00200000 +#define SDTV_STD_SECAM_L 0x00400000 +#define SDTV_STD_SECAM_LC 0x00800000 + +#define SDTV_STD_SECAM (SDTV_STD_SECAM_B | \ + SDTV_STD_SECAM_D | \ + SDTV_STD_SECAM_G | \ + SDTV_STD_SECAM_H | \ + SDTV_STD_SECAM_K | \ + SDTV_STD_SECAM_K1 | \ + SDTV_STD_SECAM_L | \ + SDTV_STD_SECAM_LC) + +/* Standards for Countries with 60Hz Line frequency */ +#define SDTV_STD_525_60 (SDTV_STD_PAL_M | \ + SDTV_STD_PAL_60 | \ + SDTV_STD_NTSC | \ + SDTV_STD_NTSC_443) + +/* Standards for Countries with 50Hz Line frequency */ +#define SDTV_STD_625_50 (SDTV_STD_PAL | \ + SDTV_STD_PAL_N | \ + SDTV_STD_PAL_Nc | \ + SDTV_STD_SECAM) + +#endif /* _DT_BINDINGS_DISPLAY_SDTV_STDS_H */ diff --git a/include/dt-bindings/interconnect/qcom,osm-l3.h b/include/dt-bindings/interconnect/qcom,osm-l3.h new file mode 100644 index 000000000000..54858ff7674d --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,osm-l3.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_OSM_L3_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_OSM_L3_H + +#define MASTER_OSM_L3_APPS 0 +#define SLAVE_OSM_L3 1 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sc7180.h b/include/dt-bindings/interconnect/qcom,sc7180.h new file mode 100644 index 000000000000..f9970f6032eb --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sc7180.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm SC7180 interconnect IDs + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7180_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7180_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_QSPI 1 +#define MASTER_QUP_0 2 +#define MASTER_SDCC_2 3 +#define MASTER_EMMC 4 +#define MASTER_UFS_MEM 5 +#define SLAVE_A1NOC_SNOC 6 +#define SLAVE_SERVICE_A1NOC 7 + +#define MASTER_A2NOC_CFG 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QUP_1 2 +#define MASTER_USB3 3 +#define MASTER_CRYPTO 4 +#define MASTER_IPA 5 +#define MASTER_QDSS_ETR 6 +#define SLAVE_A2NOC_SNOC 7 +#define SLAVE_SERVICE_A2NOC 8 + +#define MASTER_CAMNOC_HF0_UNCOMP 0 +#define MASTER_CAMNOC_HF1_UNCOMP 1 +#define MASTER_CAMNOC_SF_UNCOMP 2 +#define SLAVE_CAMNOC_UNCOMP 3 + +#define MASTER_NPU 0 +#define MASTER_NPU_PROC 1 +#define SLAVE_CDSP_GEM_NOC 2 + +#define MASTER_SNOC_CNOC 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_A1NOC_CFG 2 +#define SLAVE_A2NOC_CFG 3 +#define SLAVE_AHB2PHY_SOUTH 4 +#define SLAVE_AHB2PHY_CENTER 5 +#define SLAVE_AOP 6 +#define SLAVE_AOSS 7 +#define SLAVE_BOOT_ROM 8 +#define SLAVE_CAMERA_CFG 9 +#define SLAVE_CAMERA_NRT_THROTTLE_CFG 10 +#define SLAVE_CAMERA_RT_THROTTLE_CFG 11 +#define SLAVE_CLK_CTL 12 +#define SLAVE_RBCPR_CX_CFG 13 +#define SLAVE_RBCPR_MX_CFG 14 +#define SLAVE_CRYPTO_0_CFG 15 +#define SLAVE_DCC_CFG 16 +#define SLAVE_CNOC_DDRSS 17 +#define SLAVE_DISPLAY_CFG 18 +#define SLAVE_DISPLAY_RT_THROTTLE_CFG 19 +#define SLAVE_DISPLAY_THROTTLE_CFG 20 +#define SLAVE_EMMC_CFG 21 +#define SLAVE_GLM 22 +#define SLAVE_GFX3D_CFG 23 +#define SLAVE_IMEM_CFG 24 +#define SLAVE_IPA_CFG 25 +#define SLAVE_CNOC_MNOC_CFG 26 +#define SLAVE_CNOC_MSS 27 +#define SLAVE_NPU_CFG 28 +#define SLAVE_NPU_DMA_BWMON_CFG 29 +#define SLAVE_NPU_PROC_BWMON_CFG 30 +#define SLAVE_PDM 31 +#define SLAVE_PIMEM_CFG 32 +#define SLAVE_PRNG 33 +#define SLAVE_QDSS_CFG 34 +#define SLAVE_QM_CFG 35 +#define SLAVE_QM_MPU_CFG 36 +#define SLAVE_QSPI_0 37 +#define SLAVE_QUP_0 38 +#define SLAVE_QUP_1 39 +#define SLAVE_SDCC_2 40 +#define SLAVE_SECURITY 41 +#define SLAVE_SNOC_CFG 42 +#define SLAVE_TCSR 43 +#define SLAVE_TLMM_WEST 44 +#define SLAVE_TLMM_NORTH 45 +#define SLAVE_TLMM_SOUTH 46 +#define SLAVE_UFS_MEM_CFG 47 +#define SLAVE_USB3 48 +#define SLAVE_VENUS_CFG 49 +#define SLAVE_VENUS_THROTTLE_CFG 50 +#define SLAVE_VSENSE_CTRL_CFG 51 +#define SLAVE_SERVICE_CNOC 52 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_GEM_NOC_CFG 1 +#define SLAVE_LLCC_CFG 2 + +#define MASTER_APPSS_PROC 0 +#define MASTER_SYS_TCU 1 +#define MASTER_GEM_NOC_CFG 2 +#define MASTER_COMPUTE_NOC 3 +#define MASTER_MNOC_HF_MEM_NOC 4 +#define MASTER_MNOC_SF_MEM_NOC 5 +#define MASTER_SNOC_GC_MEM_NOC 6 +#define MASTER_SNOC_SF_MEM_NOC 7 +#define MASTER_GFX3D 8 +#define SLAVE_MSS_PROC_MS_MPU_CFG 9 +#define SLAVE_GEM_NOC_SNOC 10 +#define SLAVE_LLCC 11 +#define SLAVE_SERVICE_GEM_NOC 12 + +#define MASTER_IPA_CORE 0 +#define SLAVE_IPA_CORE 1 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_CAMNOC_HF0 1 +#define MASTER_CAMNOC_HF1 2 +#define MASTER_CAMNOC_SF 3 +#define MASTER_MDP0 4 +#define MASTER_ROTATOR 5 +#define MASTER_VIDEO_P0 6 +#define MASTER_VIDEO_PROC 7 +#define SLAVE_MNOC_HF_MEM_NOC 8 +#define SLAVE_MNOC_SF_MEM_NOC 9 +#define SLAVE_SERVICE_MNOC 10 + +#define MASTER_NPU_SYS 0 +#define MASTER_NPU_NOC_CFG 1 +#define SLAVE_NPU_CAL_DP0 2 +#define SLAVE_NPU_CP 3 +#define SLAVE_NPU_INT_DMA_BWMON_CFG 4 +#define SLAVE_NPU_DPM 5 +#define SLAVE_ISENSE_CFG 6 +#define SLAVE_NPU_LLM_CFG 7 +#define SLAVE_NPU_TCM 8 +#define SLAVE_NPU_COMPUTE_NOC 9 +#define SLAVE_SERVICE_NPU_NOC 10 + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define SLAVE_QUP_CORE_0 2 +#define SLAVE_QUP_CORE_1 3 + +#define MASTER_SNOC_CFG 0 +#define MASTER_A1NOC_SNOC 1 +#define MASTER_A2NOC_SNOC 2 +#define MASTER_GEM_NOC_SNOC 3 +#define MASTER_PIMEM 4 +#define SLAVE_APPSS 5 +#define SLAVE_SNOC_CNOC 6 +#define SLAVE_SNOC_GEM_NOC_GC 7 +#define SLAVE_SNOC_GEM_NOC_SF 8 +#define SLAVE_IMEM 9 +#define SLAVE_PIMEM 10 +#define SLAVE_SERVICE_SNOC 11 +#define SLAVE_QDSS_STM 12 +#define SLAVE_TCU 13 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h index 7b2393be7361..290be38f40e6 100644 --- a/include/dt-bindings/interconnect/qcom,sdm845.h +++ b/include/dt-bindings/interconnect/qcom,sdm845.h @@ -10,134 +10,139 @@ #define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H #define MASTER_A1NOC_CFG 0 -#define MASTER_BLSP_1 1 -#define MASTER_TSIF 2 -#define MASTER_SDCC_2 3 -#define MASTER_SDCC_4 4 -#define MASTER_UFS_CARD 5 -#define MASTER_UFS_MEM 6 -#define MASTER_PCIE_0 7 -#define MASTER_A2NOC_CFG 8 -#define MASTER_QDSS_BAM 9 -#define MASTER_BLSP_2 10 -#define MASTER_CNOC_A2NOC 11 -#define MASTER_CRYPTO 12 -#define MASTER_IPA 13 -#define MASTER_PCIE_1 14 -#define MASTER_QDSS_ETR 15 -#define MASTER_USB3_0 16 -#define MASTER_USB3_1 17 -#define MASTER_CAMNOC_HF0_UNCOMP 18 -#define MASTER_CAMNOC_HF1_UNCOMP 19 -#define MASTER_CAMNOC_SF_UNCOMP 20 -#define MASTER_SPDM 21 -#define MASTER_TIC 22 -#define MASTER_SNOC_CNOC 23 -#define MASTER_QDSS_DAP 24 -#define MASTER_CNOC_DC_NOC 25 -#define MASTER_APPSS_PROC 26 -#define MASTER_GNOC_CFG 27 -#define MASTER_LLCC 28 -#define MASTER_TCU_0 29 -#define MASTER_MEM_NOC_CFG 30 -#define MASTER_GNOC_MEM_NOC 31 -#define MASTER_MNOC_HF_MEM_NOC 32 -#define MASTER_MNOC_SF_MEM_NOC 33 -#define MASTER_SNOC_GC_MEM_NOC 34 -#define MASTER_SNOC_SF_MEM_NOC 35 -#define MASTER_GFX3D 36 -#define MASTER_CNOC_MNOC_CFG 37 -#define MASTER_CAMNOC_HF0 38 -#define MASTER_CAMNOC_HF1 39 -#define MASTER_CAMNOC_SF 40 -#define MASTER_MDP0 41 -#define MASTER_MDP1 42 -#define MASTER_ROTATOR 43 -#define MASTER_VIDEO_P0 44 -#define MASTER_VIDEO_P1 45 -#define MASTER_VIDEO_PROC 46 -#define MASTER_SNOC_CFG 47 -#define MASTER_A1NOC_SNOC 48 -#define MASTER_A2NOC_SNOC 49 -#define MASTER_GNOC_SNOC 50 -#define MASTER_MEM_NOC_SNOC 51 -#define MASTER_ANOC_PCIE_SNOC 52 -#define MASTER_PIMEM 53 -#define MASTER_GIC 54 -#define SLAVE_A1NOC_SNOC 55 -#define SLAVE_SERVICE_A1NOC 56 -#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57 -#define SLAVE_A2NOC_SNOC 58 -#define SLAVE_ANOC_PCIE_SNOC 59 -#define SLAVE_SERVICE_A2NOC 60 -#define SLAVE_CAMNOC_UNCOMP 61 -#define SLAVE_A1NOC_CFG 62 -#define SLAVE_A2NOC_CFG 63 -#define SLAVE_AOP 64 -#define SLAVE_AOSS 65 -#define SLAVE_CAMERA_CFG 66 -#define SLAVE_CLK_CTL 67 -#define SLAVE_CDSP_CFG 68 -#define SLAVE_RBCPR_CX_CFG 69 -#define SLAVE_CRYPTO_0_CFG 70 -#define SLAVE_DCC_CFG 71 -#define SLAVE_CNOC_DDRSS 72 -#define SLAVE_DISPLAY_CFG 73 -#define SLAVE_GLM 74 -#define SLAVE_GFX3D_CFG 75 -#define SLAVE_IMEM_CFG 76 -#define SLAVE_IPA_CFG 77 -#define SLAVE_CNOC_MNOC_CFG 78 -#define SLAVE_PCIE_0_CFG 79 -#define SLAVE_PCIE_1_CFG 80 -#define SLAVE_PDM 81 -#define SLAVE_SOUTH_PHY_CFG 82 -#define SLAVE_PIMEM_CFG 83 -#define SLAVE_PRNG 84 -#define SLAVE_QDSS_CFG 85 -#define SLAVE_BLSP_2 86 -#define SLAVE_BLSP_1 87 -#define SLAVE_SDCC_2 88 -#define SLAVE_SDCC_4 89 -#define SLAVE_SNOC_CFG 90 -#define SLAVE_SPDM_WRAPPER 91 -#define SLAVE_SPSS_CFG 92 -#define SLAVE_TCSR 93 -#define SLAVE_TLMM_NORTH 94 -#define SLAVE_TLMM_SOUTH 95 -#define SLAVE_TSIF 96 -#define SLAVE_UFS_CARD_CFG 97 -#define SLAVE_UFS_MEM_CFG 98 -#define SLAVE_USB3_0 99 -#define SLAVE_USB3_1 100 -#define SLAVE_VENUS_CFG 101 -#define SLAVE_VSENSE_CTRL_CFG 102 -#define SLAVE_CNOC_A2NOC 103 -#define SLAVE_SERVICE_CNOC 104 -#define SLAVE_LLCC_CFG 105 -#define SLAVE_MEM_NOC_CFG 106 -#define SLAVE_GNOC_SNOC 107 -#define SLAVE_GNOC_MEM_NOC 108 -#define SLAVE_SERVICE_GNOC 109 -#define SLAVE_EBI1 110 -#define SLAVE_MSS_PROC_MS_MPU_CFG 111 -#define SLAVE_MEM_NOC_GNOC 112 -#define SLAVE_LLCC 113 -#define SLAVE_MEM_NOC_SNOC 114 -#define SLAVE_SERVICE_MEM_NOC 115 -#define SLAVE_MNOC_SF_MEM_NOC 116 -#define SLAVE_MNOC_HF_MEM_NOC 117 -#define SLAVE_SERVICE_MNOC 118 -#define SLAVE_APPSS 119 -#define SLAVE_SNOC_CNOC 120 -#define SLAVE_SNOC_MEM_NOC_GC 121 -#define SLAVE_SNOC_MEM_NOC_SF 122 -#define SLAVE_IMEM 123 -#define SLAVE_PCIE_0 124 -#define SLAVE_PCIE_1 125 -#define SLAVE_PIMEM 126 -#define SLAVE_SERVICE_SNOC 127 -#define SLAVE_QDSS_STM 128 -#define SLAVE_TCU 129 +#define MASTER_TSIF 1 +#define MASTER_SDCC_2 2 +#define MASTER_SDCC_4 3 +#define MASTER_UFS_CARD 4 +#define MASTER_UFS_MEM 5 +#define MASTER_PCIE_0 6 +#define SLAVE_A1NOC_SNOC 7 +#define SLAVE_SERVICE_A1NOC 8 +#define SLAVE_ANOC_PCIE_A1NOC_SNOC 9 + +#define MASTER_A2NOC_CFG 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_CNOC_A2NOC 2 +#define MASTER_CRYPTO 3 +#define MASTER_IPA 4 +#define MASTER_PCIE_1 5 +#define MASTER_QDSS_ETR 6 +#define MASTER_USB3_0 7 +#define MASTER_USB3_1 8 +#define SLAVE_A2NOC_SNOC 9 +#define SLAVE_ANOC_PCIE_SNOC 10 +#define SLAVE_SERVICE_A2NOC 11 + +#define MASTER_SPDM 0 +#define MASTER_TIC 1 +#define MASTER_SNOC_CNOC 2 +#define MASTER_QDSS_DAP 3 +#define SLAVE_A1NOC_CFG 4 +#define SLAVE_A2NOC_CFG 5 +#define SLAVE_AOP 6 +#define SLAVE_AOSS 7 +#define SLAVE_CAMERA_CFG 8 +#define SLAVE_CLK_CTL 9 +#define SLAVE_CDSP_CFG 10 +#define SLAVE_RBCPR_CX_CFG 11 +#define SLAVE_CRYPTO_0_CFG 12 +#define SLAVE_DCC_CFG 13 +#define SLAVE_CNOC_DDRSS 14 +#define SLAVE_DISPLAY_CFG 15 +#define SLAVE_GLM 16 +#define SLAVE_GFX3D_CFG 17 +#define SLAVE_IMEM_CFG 18 +#define SLAVE_IPA_CFG 19 +#define SLAVE_CNOC_MNOC_CFG 20 +#define SLAVE_PCIE_0_CFG 21 +#define SLAVE_PCIE_1_CFG 22 +#define SLAVE_PDM 23 +#define SLAVE_SOUTH_PHY_CFG 24 +#define SLAVE_PIMEM_CFG 25 +#define SLAVE_PRNG 26 +#define SLAVE_QDSS_CFG 27 +#define SLAVE_BLSP_2 28 +#define SLAVE_BLSP_1 29 +#define SLAVE_SDCC_2 30 +#define SLAVE_SDCC_4 31 +#define SLAVE_SNOC_CFG 32 +#define SLAVE_SPDM_WRAPPER 33 +#define SLAVE_SPSS_CFG 34 +#define SLAVE_TCSR 35 +#define SLAVE_TLMM_NORTH 36 +#define SLAVE_TLMM_SOUTH 37 +#define SLAVE_TSIF 38 +#define SLAVE_UFS_CARD_CFG 39 +#define SLAVE_UFS_MEM_CFG 40 +#define SLAVE_USB3_0 41 +#define SLAVE_USB3_1 42 +#define SLAVE_VENUS_CFG 43 +#define SLAVE_VSENSE_CTRL_CFG 44 +#define SLAVE_CNOC_A2NOC 45 +#define SLAVE_SERVICE_CNOC 46 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_MEM_NOC_CFG 2 + +#define MASTER_APPSS_PROC 0 +#define MASTER_GNOC_CFG 1 +#define SLAVE_GNOC_SNOC 2 +#define SLAVE_GNOC_MEM_NOC 3 +#define SLAVE_SERVICE_GNOC 4 + +#define MASTER_TCU_0 0 +#define MASTER_MEM_NOC_CFG 1 +#define MASTER_GNOC_MEM_NOC 2 +#define MASTER_MNOC_HF_MEM_NOC 3 +#define MASTER_MNOC_SF_MEM_NOC 4 +#define MASTER_SNOC_GC_MEM_NOC 5 +#define MASTER_SNOC_SF_MEM_NOC 6 +#define MASTER_GFX3D 7 +#define SLAVE_MSS_PROC_MS_MPU_CFG 8 +#define SLAVE_MEM_NOC_GNOC 9 +#define SLAVE_LLCC 10 +#define SLAVE_MEM_NOC_SNOC 11 +#define SLAVE_SERVICE_MEM_NOC 12 +#define MASTER_LLCC 13 +#define SLAVE_EBI1 14 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_CAMNOC_HF0 1 +#define MASTER_CAMNOC_HF1 2 +#define MASTER_CAMNOC_SF 3 +#define MASTER_MDP0 4 +#define MASTER_MDP1 5 +#define MASTER_ROTATOR 6 +#define MASTER_VIDEO_P0 7 +#define MASTER_VIDEO_P1 8 +#define MASTER_VIDEO_PROC 9 +#define SLAVE_MNOC_SF_MEM_NOC 10 +#define SLAVE_MNOC_HF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 +#define MASTER_CAMNOC_HF0_UNCOMP 13 +#define MASTER_CAMNOC_HF1_UNCOMP 14 +#define MASTER_CAMNOC_SF_UNCOMP 15 +#define SLAVE_CAMNOC_UNCOMP 16 + +#define MASTER_SNOC_CFG 0 +#define MASTER_A1NOC_SNOC 1 +#define MASTER_A2NOC_SNOC 2 +#define MASTER_GNOC_SNOC 3 +#define MASTER_MEM_NOC_SNOC 4 +#define MASTER_ANOC_PCIE_SNOC 5 +#define MASTER_PIMEM 6 +#define MASTER_GIC 7 +#define SLAVE_APPSS 8 +#define SLAVE_SNOC_CNOC 9 +#define SLAVE_SNOC_MEM_NOC_GC 10 +#define SLAVE_SNOC_MEM_NOC_SF 11 +#define SLAVE_IMEM 12 +#define SLAVE_PCIE_0 13 +#define SLAVE_PCIE_1 14 +#define SLAVE_PIMEM 15 +#define SLAVE_SERVICE_SNOC 16 +#define SLAVE_QDSS_STM 17 +#define SLAVE_TCU 18 #endif diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h index 9e1256a7c1bf..0ce7dfc00dcb 100644 --- a/include/dt-bindings/leds/common.h +++ b/include/dt-bindings/leds/common.h @@ -6,6 +6,7 @@ * Author: Jacek Anaszewski <j.anaszewski@samsung.com> * * Copyright (C) 2019 Jacek Anaszewski <jacek.anaszewski@gmail.com> + * Copyright (C) 2020 Pavel Machek <pavel@ucw.cz> */ #ifndef __DT_BINDINGS_LEDS_H @@ -32,16 +33,38 @@ #define LED_COLOR_ID_MAX 8 /* Standard LED functions */ +/* Keyboard LEDs, usually it would be input4::capslock etc. */ +/* Obsolete equivalent: "shift-key-light" */ +#define LED_FUNCTION_CAPSLOCK "capslock" +#define LED_FUNCTION_SCROLLLOCK "scrolllock" +#define LED_FUNCTION_NUMLOCK "numlock" +/* Obsolete equivalents: "tpacpi::thinklight" (IBM/Lenovo Thinkpads), + "lp5523:kb{1,2,3,4,5,6}" (Nokia N900) */ +#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight" + +/* System LEDs, usually found on system body. + platform::mute (etc) is sometimes seen, :mute would be better */ +#define LED_FUNCTION_POWER "power" +#define LED_FUNCTION_DISK "disk" + +/* Obsolete: "platform:*:charging" (allwinner sun50i) */ +#define LED_FUNCTION_CHARGING "charging" +/* Used RGB notification LEDs common on phones. + Obsolete equivalents: "status-led:{red,green,blue}" (Motorola Droid 4), + "lp5523:{r,g,b}" (Nokia N900) */ +#define LED_FUNCTION_STATUS "status" + +#define LED_FUNCTION_MICMUTE "micmute" +#define LED_FUNCTION_MUTE "mute" + +/* Miscelleaus functions. Use functions above if you can. */ #define LED_FUNCTION_ACTIVITY "activity" #define LED_FUNCTION_ALARM "alarm" #define LED_FUNCTION_BACKLIGHT "backlight" #define LED_FUNCTION_BLUETOOTH "bluetooth" #define LED_FUNCTION_BOOT "boot" #define LED_FUNCTION_CPU "cpu" -#define LED_FUNCTION_CAPSLOCK "capslock" -#define LED_FUNCTION_CHARGING "charging" #define LED_FUNCTION_DEBUG "debug" -#define LED_FUNCTION_DISK "disk" #define LED_FUNCTION_DISK_ACTIVITY "disk-activity" #define LED_FUNCTION_DISK_ERR "disk-err" #define LED_FUNCTION_DISK_READ "disk-read" @@ -50,21 +73,14 @@ #define LED_FUNCTION_FLASH "flash" #define LED_FUNCTION_HEARTBEAT "heartbeat" #define LED_FUNCTION_INDICATOR "indicator" -#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight" #define LED_FUNCTION_LAN "lan" #define LED_FUNCTION_MAIL "mail" #define LED_FUNCTION_MTD "mtd" -#define LED_FUNCTION_MICMUTE "micmute" -#define LED_FUNCTION_MUTE "mute" -#define LED_FUNCTION_NUMLOCK "numlock" #define LED_FUNCTION_PANIC "panic" #define LED_FUNCTION_PROGRAMMING "programming" -#define LED_FUNCTION_POWER "power" #define LED_FUNCTION_RX "rx" #define LED_FUNCTION_SD "sd" -#define LED_FUNCTION_SCROLLLOCK "scrolllock" #define LED_FUNCTION_STANDBY "standby" -#define LED_FUNCTION_STATUS "status" #define LED_FUNCTION_TORCH "torch" #define LED_FUNCTION_TX "tx" #define LED_FUNCTION_USB "usb" diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h index 01eedf4985b8..dda00c038530 100644 --- a/include/dt-bindings/media/tvp5150.h +++ b/include/dt-bindings/media/tvp5150.h @@ -14,8 +14,6 @@ #define TVP5150_COMPOSITE1 1 #define TVP5150_SVIDEO 2 -#define TVP5150_INPUT_NUM 3 - /* TVP5150 HW outputs */ #define TVP5150_NORMAL 0 #define TVP5150_BLACK_SCREEN 1 diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h new file mode 100644 index 000000000000..6cf50bfb8ccf --- /dev/null +++ b/include/dt-bindings/power/meson-a1-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 Amlogic, Inc. + * Author: Jianxin Pan <jianxin.pan@amlogic.com> + */ + +#ifndef _DT_BINDINGS_MESON_A1_POWER_H +#define _DT_BINDINGS_MESON_A1_POWER_H + +#define PWRC_DSPA_ID 8 +#define PWRC_DSPB_ID 9 +#define PWRC_UART_ID 10 +#define PWRC_DMC_ID 11 +#define PWRC_I2C_ID 12 +#define PWRC_PSRAM_ID 13 +#define PWRC_ACODEC_ID 14 +#define PWRC_AUDIO_ID 15 +#define PWRC_OTP_ID 16 +#define PWRC_DMA_ID 17 +#define PWRC_SD_EMMC_ID 18 +#define PWRC_RAMA_ID 19 +#define PWRC_RAMB_ID 20 +#define PWRC_IR_ID 21 +#define PWRC_SPICC_ID 22 +#define PWRC_SPIFC_ID 23 +#define PWRC_USB_ID 24 +#define PWRC_NIC_ID 25 +#define PWRC_PDMIN_ID 26 +#define PWRC_RSA_ID 27 +#define PWRC_MAX_ID 28 + +#endif diff --git a/include/dt-bindings/soc/tegra-pmc.h b/include/dt-bindings/soc/tegra-pmc.h new file mode 100644 index 000000000000..a99a457471ee --- /dev/null +++ b/include/dt-bindings/soc/tegra-pmc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. + */ + +#ifndef _DT_BINDINGS_SOC_TEGRA_PMC_H +#define _DT_BINDINGS_SOC_TEGRA_PMC_H + +#define TEGRA_PMC_CLK_OUT_1 0 +#define TEGRA_PMC_CLK_OUT_2 1 +#define TEGRA_PMC_CLK_OUT_3 2 +#define TEGRA_PMC_CLK_BLINK 3 + +#define TEGRA_PMC_CLK_MAX 4 + +#endif /* _DT_BINDINGS_SOC_TEGRA_PMC_H */ diff --git a/include/dt-bindings/sound/meson-aiu.h b/include/dt-bindings/sound/meson-aiu.h new file mode 100644 index 000000000000..1051b8af298b --- /dev/null +++ b/include/dt-bindings/sound/meson-aiu.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_MESON_AIU_H +#define __DT_MESON_AIU_H + +#define AIU_CPU 0 +#define AIU_HDMI 1 +#define AIU_ACODEC 2 + +#define CPU_I2S_FIFO 0 +#define CPU_SPDIF_FIFO 1 +#define CPU_I2S_ENCODER 2 +#define CPU_SPDIF_ENCODER 3 + +#define CTRL_I2S 0 +#define CTRL_PCM 1 +#define CTRL_OUT 2 + +#endif /* __DT_MESON_AIU_H */ diff --git a/include/dt-bindings/sound/meson-g12a-toacodec.h b/include/dt-bindings/sound/meson-g12a-toacodec.h new file mode 100644 index 000000000000..69d7a75592a2 --- /dev/null +++ b/include/dt-bindings/sound/meson-g12a-toacodec.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_MESON_G12A_TOACODEC_H +#define __DT_MESON_G12A_TOACODEC_H + +#define TOACODEC_IN_A 0 +#define TOACODEC_IN_B 1 +#define TOACODEC_IN_C 2 +#define TOACODEC_OUT 3 + +#endif /* __DT_MESON_G12A_TOACODEC_H */ diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h index f6a7ba4dccd4..3fee04f81439 100644 --- a/include/keys/big_key-type.h +++ b/include/keys/big_key-type.h @@ -17,6 +17,6 @@ extern void big_key_free_preparse(struct key_preparsed_payload *prep); extern void big_key_revoke(struct key *key); extern void big_key_destroy(struct key *key); extern void big_key_describe(const struct key *big_key, struct seq_file *m); -extern long big_key_read(const struct key *key, char __user *buffer, size_t buflen); +extern long big_key_read(const struct key *key, char *buffer, size_t buflen); #endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/user-type.h b/include/keys/user-type.h index d5e73266a81a..be61fcddc02a 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -41,8 +41,7 @@ extern int user_update(struct key *key, struct key_preparsed_payload *prep); extern void user_revoke(struct key *key); extern void user_destroy(struct key *key); extern void user_describe(const struct key *user, struct seq_file *m); -extern long user_read(const struct key *key, - char __user *buffer, size_t buflen); +extern long user_read(const struct key *key, char *buffer, size_t buflen); static inline const struct user_key_payload *user_key_payload_rcu(const struct key *key) { diff --git a/include/kunit/test.h b/include/kunit/test.h index 2dfb550c6723..9b0c46a6ca1f 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -81,6 +81,17 @@ struct kunit_resource { struct kunit; +/* Size of log associated with test. */ +#define KUNIT_LOG_SIZE 512 + +/* + * TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a + * sub-subtest. See the "Subtests" section in + * https://node-tap.org/tap-protocol/ + */ +#define KUNIT_SUBTEST_INDENT " " +#define KUNIT_SUBSUBTEST_INDENT " " + /** * struct kunit_case - represents an individual test case. * @@ -123,8 +134,14 @@ struct kunit_case { /* private: internal use only. */ bool success; + char *log; }; +static inline char *kunit_status_to_string(bool status) +{ + return status ? "ok" : "not ok"; +} + /** * KUNIT_CASE - A helper for creating a &struct kunit_case * @@ -157,6 +174,10 @@ struct kunit_suite { int (*init)(struct kunit *test); void (*exit)(struct kunit *test); struct kunit_case *test_cases; + + /* private - internal use only */ + struct dentry *debugfs; + char *log; }; /** @@ -175,6 +196,7 @@ struct kunit { /* private: internal use only. */ const char *name; /* Read only after initialization! */ + char *log; /* Points at case log after initialization */ struct kunit_try_catch try_catch; /* * success starts as true, and may only be set to false during a @@ -193,10 +215,19 @@ struct kunit { struct list_head resources; /* Protected by lock. */ }; -void kunit_init_test(struct kunit *test, const char *name); +void kunit_init_test(struct kunit *test, const char *name, char *log); int kunit_run_tests(struct kunit_suite *suite); +size_t kunit_suite_num_test_cases(struct kunit_suite *suite); + +unsigned int kunit_test_case_num(struct kunit_suite *suite, + struct kunit_case *test_case); + +int __kunit_test_suites_init(struct kunit_suite **suites); + +void __kunit_test_suites_exit(struct kunit_suite **suites); + /** * kunit_test_suites() - used to register one or more &struct kunit_suite * with KUnit. @@ -226,20 +257,22 @@ int kunit_run_tests(struct kunit_suite *suite); static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \ static int kunit_test_suites_init(void) \ { \ - unsigned int i; \ - for (i = 0; suites[i] != NULL; i++) \ - kunit_run_tests(suites[i]); \ - return 0; \ + return __kunit_test_suites_init(suites); \ } \ late_initcall(kunit_test_suites_init); \ static void __exit kunit_test_suites_exit(void) \ { \ - return; \ + return __kunit_test_suites_exit(suites); \ } \ module_exit(kunit_test_suites_exit) #define kunit_test_suite(suite) kunit_test_suites(&suite) +#define kunit_suite_for_each_test_case(suite, test_case) \ + for (test_case = suite->test_cases; test_case->run_case; test_case++) + +bool kunit_suite_has_succeeded(struct kunit_suite *suite); + /* * Like kunit_alloc_resource() below, but returns the struct kunit_resource * object that contains the allocation. This is mostly for testing purposes. @@ -356,8 +389,22 @@ static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp) void kunit_cleanup(struct kunit *test); -#define kunit_printk(lvl, test, fmt, ...) \ - printk(lvl "\t# %s: " fmt, (test)->name, ##__VA_ARGS__) +void kunit_log_append(char *log, const char *fmt, ...); + +/* + * printk and log to per-test or per-suite log buffer. Logging only done + * if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used. + */ +#define kunit_log(lvl, test_or_suite, fmt, ...) \ + do { \ + printk(lvl fmt, ##__VA_ARGS__); \ + kunit_log_append((test_or_suite)->log, fmt "\n", \ + ##__VA_ARGS__); \ + } while (0) + +#define kunit_printk(lvl, test, fmt, ...) \ + kunit_log(lvl, test, KUNIT_SUBTEST_INDENT "# %s: " fmt, \ + (test)->name, ##__VA_ARGS__) /** * kunit_info() - Prints an INFO level message associated with @test. diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9d53f545a3d5..69f4164d6477 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -70,6 +70,7 @@ struct vgic_global { /* Hardware has GICv4? */ bool has_gicv4; + bool has_gicv4_1; /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; @@ -230,6 +231,9 @@ struct vgic_dist { /* distributor enabled */ bool enabled; + /* Wants SGIs without active state */ + bool nassgireq; + struct vgic_irq *spis; struct vgic_io_device dist_iodev; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0f24d701fbdc..9f70b7807d53 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -488,6 +488,11 @@ void __init acpi_nvs_nosave_s3(void); void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ +int acpi_register_wakeup_handler( + int wake_irq, bool (*wakeup)(void *context), void *context); +void acpi_unregister_wakeup_handler( + bool (*wakeup)(void *context), void *context); + struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; @@ -530,8 +535,9 @@ extern bool osc_pc_lpi_support_confirmed; #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 #define OSC_PCI_MSI_SUPPORT 0x00000010 +#define OSC_PCI_EDR_SUPPORT 0x00000080 #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 -#define OSC_PCI_SUPPORT_MASKS 0x0000011f +#define OSC_PCI_SUPPORT_MASKS 0x0000019f /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 @@ -540,7 +546,8 @@ extern bool osc_pc_lpi_support_confirmed; #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 #define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 -#define OSC_PCI_CONTROL_MASKS 0x0000003f +#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 +#define OSC_PCI_CONTROL_MASKS 0x000000bf #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 diff --git a/include/linux/aer.h b/include/linux/aer.h index fa19e01f418a..97f64ba1b34a 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -44,8 +44,7 @@ struct aer_capability_regs { /* PCIe port driver needs this function to enable AER */ int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); -int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); -int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); +int pci_aer_clear_nonfatal_status(struct pci_dev *dev); void pci_save_aer_state(struct pci_dev *dev); void pci_restore_aer_state(struct pci_dev *dev); #else @@ -57,11 +56,7 @@ static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) { return -EINVAL; } -static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) -{ - return -EINVAL; -} -static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) { return -EINVAL; } diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 3015ecbb90b1..0566cb3314ef 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -16,9 +16,7 @@ bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); DECLARE_PER_CPU(unsigned long, cpu_scale); -struct sched_domain; -static inline -unsigned long topology_get_cpu_scale(int cpu) +static inline unsigned long topology_get_cpu_scale(int cpu) { return per_cpu(cpu_scale, cpu); } @@ -27,12 +25,23 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); DECLARE_PER_CPU(unsigned long, freq_scale); -static inline -unsigned long topology_get_freq_scale(int cpu) +static inline unsigned long topology_get_freq_scale(int cpu) { return per_cpu(freq_scale, cpu); } +bool arch_freq_counters_available(struct cpumask *cpus); + +DECLARE_PER_CPU(unsigned long, thermal_pressure); + +static inline unsigned long topology_get_thermal_pressure(int cpu) +{ + return per_cpu(thermal_pressure, cpu); +} + +void arch_set_thermal_pressure(struct cpumask *cpus, + unsigned long th_pressure); + struct cpu_topology { int thread_id; int core_id; diff --git a/include/linux/atmel-isc-media.h b/include/linux/atmel-isc-media.h new file mode 100644 index 000000000000..79a320fb724e --- /dev/null +++ b/include/linux/atmel-isc-media.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries + * + * Author: Eugen Hristev <eugen.hristev@microchip.com> + */ + +#ifndef __LINUX_ATMEL_ISC_MEDIA_H__ +#define __LINUX_ATMEL_ISC_MEDIA_H__ + +/* + * There are 8 controls available: + * 4 gain controls, sliders, for each of the BAYER components: R, B, GR, GB. + * These gains are multipliers for each component, in format unsigned 0:4:9 with + * a default value of 512 (1.0 multiplier). + * 4 offset controls, sliders, for each of the BAYER components: R, B, GR, GB. + * These offsets are added/substracted from each component, in format signed + * 1:12:0 with a default value of 0 (+/- 0) + * + * To expose this to userspace, added 8 custom controls, in an auto cluster. + * + * To summarize the functionality: + * The auto cluster switch is the auto white balance control, and it works + * like this: + * AWB == 1: autowhitebalance is on, the do_white_balance button is inactive, + * the gains/offsets are inactive, but volatile and readable. + * Thus, the results of the whitebalance algorithm are available to userspace to + * read at any time. + * AWB == 0: autowhitebalance is off, cluster is in manual mode, user can + * configure the gain/offsets directly. + * More than that, if the do_white_balance button is + * pressed, the driver will perform one-time-adjustment, (preferably with color + * checker card) and the userspace can read again the new values. + * + * With this feature, the userspace can save the coefficients and reinstall them + * for example after reboot or reprobing the driver. + */ + +enum atmel_isc_ctrl_id { + /* Red component gain control */ + ISC_CID_R_GAIN = (V4L2_CID_USER_ATMEL_ISC_BASE + 0), + /* Blue component gain control */ + ISC_CID_B_GAIN, + /* Green Red component gain control */ + ISC_CID_GR_GAIN, + /* Green Blue gain control */ + ISC_CID_GB_GAIN, + /* Red component offset control */ + ISC_CID_R_OFFSET, + /* Blue component offset control */ + ISC_CID_B_OFFSET, + /* Green Red component offset control */ + ISC_CID_GR_OFFSET, + /* Green Blue component offset control */ + ISC_CID_GB_OFFSET, +}; + +#endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b40fc633f3be..a345d9fed3d8 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -44,7 +44,13 @@ struct linux_binprm { * exec has happened. Used to sanitize execution environment * and to set AT_SECURE auxv for glibc. */ - secureexec:1; + secureexec:1, + /* + * Set by flush_old_exec, when exec_mmap has been called. + * This is past the point of no return, when the + * exec_update_mutex has been taken. + */ + called_exec_mmap:1; #ifdef __alpha__ unsigned int taso:1; #endif diff --git a/include/linux/bio.h b/include/linux/bio.h index 853d92ceee64..c1c0f9ea4e63 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -441,14 +441,6 @@ void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); void bio_release_pages(struct bio *bio, bool mark_dirty); -struct rq_map_data; -extern struct bio *bio_map_user_iov(struct request_queue *, - struct iov_iter *, gfp_t); -extern void bio_unmap_user(struct bio *); -extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, - gfp_t); -extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, - gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); @@ -463,14 +455,9 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, extern void bio_copy_data(struct bio *dst, struct bio *src); extern void bio_list_copy_data(struct bio *dst, struct bio *src); extern void bio_free_pages(struct bio *bio); - -extern struct bio *bio_copy_user_iov(struct request_queue *, - struct rq_map_data *, - struct iov_iter *, - gfp_t); -extern int bio_uncopy_user(struct bio *); void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); void bio_truncate(struct bio *bio, unsigned new_size); +void guard_bio_eod(struct bio *bio); static inline void zero_fill_bio(struct bio *bio) { diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 4bbb5f1c8b5b..48ea093ff04c 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -56,6 +56,19 @@ }) /** + * FIELD_MAX() - produce the maximum value representable by a field + * @_mask: shifted mask defining the field's length and position + * + * FIELD_MAX() returns the maximum value that can be held in the field + * specified by @_mask. + */ +#define FIELD_MAX(_mask) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ + (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ + }) + +/** * FIELD_FIT() - check if value fits in the field * @_mask: shifted mask defining the field's length and position * @_val: value to test against the field @@ -110,6 +123,7 @@ static __always_inline u64 field_mask(u64 field) { return field / field_multiplier(field); } +#define field_max(field) ((typeof(field))field_mask(field)) #define ____MAKE_OP(type,base,to,from) \ static __always_inline __##type type##_encode_bits(base v, base field) \ { \ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e52ceb1a73d3..99058eb81042 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -50,7 +50,13 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area - * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above + * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region + * bitmap_next_set_region(map, &start, &end, nbits) Find next set region + * bitmap_for_each_clear_region(map, rs, re, start, end) + * Iterate over all clear regions + * bitmap_for_each_set_region(map, rs, re, start, end) + * Iterate over all set regions * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 47f54b459c26..9acf654f0b19 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -162,7 +162,7 @@ static inline __u8 ror8(__u8 word, unsigned int shift) * * This is safe to use for 16- and 8-bit types as well. */ -static inline __s32 sign_extend32(__u32 value, int index) +static __always_inline __s32 sign_extend32(__u32 value, int index) { __u8 shift = 31 - index; return (__s32)(value << shift) >> shift; @@ -173,7 +173,7 @@ static inline __s32 sign_extend32(__u32 value, int index) * @value: value to sign extend * @index: 0 based bit index (0<=index<64) to sign bit */ -static inline __s64 sign_extend64(__u64 value, int index) +static __always_inline __s64 sign_extend64(__u64 value, int index) { __u8 shift = 63 - index; return (__s64)(value << shift) >> shift; diff --git a/include/linux/bits.h b/include/linux/bits.h index 669d69441a62..4671fbf28842 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -3,9 +3,9 @@ #define __LINUX_BITS_H #include <linux/const.h> +#include <vdso/bits.h> #include <asm/bitsperlong.h> -#define BIT(nr) (UL(1) << (nr)) #define BIT_ULL(nr) (ULL(1) << (nr)) #define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) @@ -18,12 +18,30 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) \ +#if !defined(__ASSEMBLY__) && \ + (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) +#include <linux/build_bug.h> +#define GENMASK_INPUT_CHECK(h, l) \ + (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ + __builtin_constant_p((l) > (h)), (l) > (h), 0))) +#else +/* + * BUILD_BUG_ON_ZERO is not available in h files included from asm files, + * disable the input check if that is the case. + */ +#define GENMASK_INPUT_CHECK(h, l) 0 +#endif + +#define __GENMASK(h, l) \ (((~UL(0)) - (UL(1) << (l)) + 1) & \ (~UL(0) >> (BITS_PER_LONG - 1 - (h)))) +#define GENMASK(h, l) \ + (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l)) -#define GENMASK_ULL(h, l) \ +#define __GENMASK_ULL(h, l) \ (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h)))) +#define GENMASK_ULL(h, l) \ + (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l)) #endif /* __LINUX_BITS_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 11cfd6470b1a..f389d7c724bd 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -162,7 +162,10 @@ struct blk_mq_hw_ctx { struct dentry *sched_debugfs_dir; #endif - /** @hctx_list: List of all hardware queues. */ + /** + * @hctx_list: if this hctx is not in use, this is an entry in + * q->unused_hctx_list. + */ struct list_head hctx_list; /** @@ -409,6 +412,8 @@ enum { << BLK_MQ_F_ALLOC_POLICY_START_BIT) struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); +struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, + void *queuedata); struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q, bool elevator_init); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 053ea4b51988..32868fbedc9e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -524,7 +524,7 @@ struct request_queue { unsigned int sg_reserved_size; int node; #ifdef CONFIG_BLK_DEV_IO_TRACE - struct blk_trace *blk_trace; + struct blk_trace __rcu *blk_trace; struct mutex blk_trace_mutex; #endif /* @@ -952,6 +952,10 @@ static inline unsigned int blk_rq_stats_sectors(const struct request *rq) } #ifdef CONFIG_BLK_DEV_ZONED + +/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ +const char *blk_zone_cond_str(enum blk_zone_cond zone_cond); + static inline unsigned int blk_rq_zone_no(struct request *rq) { return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); @@ -1063,7 +1067,6 @@ extern void blk_abort_request(struct request *); * Access functions for manipulating queue properties */ extern void blk_cleanup_queue(struct request_queue *); -extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); @@ -1140,8 +1143,7 @@ extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); -struct request_queue *blk_alloc_queue(gfp_t); -struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id); +struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1484,17 +1486,7 @@ static inline unsigned int block_size(struct block_device *bdev) return bdev->bd_block_size; } -typedef struct {struct page *v;} Sector; - -unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); - -static inline void put_dev_sector(Sector p) -{ - put_page(p.v); -} - int kblockd_schedule_work(struct work_struct *work); -int kblockd_schedule_work_on(int cpu, struct work_struct *work); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ @@ -1707,6 +1699,7 @@ struct block_device_operations { void (*swap_slot_free_notify) (struct block_device *, unsigned long); int (*report_zones)(struct gendisk *, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); + char *(*devnode)(struct gendisk *disk, umode_t *mode); struct module *owner; const struct pr_ops *pr_ops; }; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7bb2d8de9f30..3b6ff5902edc 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f **/ #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ do { \ - struct blk_trace *bt = (q)->blk_trace; \ + struct blk_trace *bt; \ + \ + rcu_read_lock(); \ + bt = rcu_dereference((q)->blk_trace); \ if (unlikely(bt)) \ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ + rcu_read_unlock(); \ } while (0) #define blk_add_trace_msg(q, fmt, ...) \ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) @@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f static inline bool blk_trace_note_message_enabled(struct request_queue *q) { - struct blk_trace *bt = q->blk_trace; - if (likely(!bt)) - return false; - return bt->act_mask & BLK_TC_NOTIFY; + struct blk_trace *bt; + bool ret; + + rcu_read_lock(); + bt = rcu_dereference(q->blk_trace); + ret = bt && (bt->act_mask & BLK_TC_NOTIFY); + rcu_read_unlock(); + return ret; } extern void blk_add_driver_data(struct request_queue *q, struct request *rq, diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 7e18c939663e..9903088891fa 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -10,6 +10,9 @@ #include <linux/kernel.h> #include <linux/types.h> +#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n" +#define BOOTCONFIG_MAGIC_LEN 12 + /* XBC tree node */ struct xbc_node { u16 next; @@ -213,7 +216,8 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node, } /* XBC node initializer */ -int __init xbc_init(char *buf); +int __init xbc_init(char *buf, const char **emsg, int *epos); + /* XBC cleanup data structures */ void __init xbc_destroy_all(void); diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index a11d5b7dbbf3..c11b413d5b1a 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -36,7 +36,7 @@ struct bpf_cgroup_storage_map; struct bpf_storage_buffer { struct rcu_head rcu; - char data[0]; + char data[]; }; struct bpf_cgroup_storage { @@ -51,9 +51,18 @@ struct bpf_cgroup_storage { struct rcu_head rcu; }; +struct bpf_cgroup_link { + struct bpf_link link; + struct cgroup *cgroup; + enum bpf_attach_type type; +}; + +extern const struct bpf_link_ops bpf_cgroup_link_lops; + struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; + struct bpf_cgroup_link *link; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; }; @@ -84,20 +93,27 @@ struct cgroup_bpf { int cgroup_bpf_inherit(struct cgroup *cgrp); void cgroup_bpf_offline(struct cgroup *cgrp); -int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, - struct bpf_prog *replace_prog, +int __cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type, u32 flags); int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type); +int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, + struct bpf_prog *new_prog); int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ -int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, - struct bpf_prog *replace_prog, enum bpf_attach_type type, +int cgroup_bpf_attach(struct cgroup *cgrp, + struct bpf_prog *prog, struct bpf_prog *replace_prog, + struct bpf_cgroup_link *link, enum bpf_attach_type type, u32 flags); int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type, u32 flags); + enum bpf_attach_type type); +int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog, + struct bpf_prog *new_prog); int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -332,11 +348,13 @@ int cgroup_bpf_prog_attach(const union bpf_attr *attr, enum bpf_prog_type ptype, struct bpf_prog *prog); int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else struct bpf_prog; +struct bpf_link; struct cgroup_bpf {}; static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} @@ -354,6 +372,19 @@ static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, return -EINVAL; } +static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int cgroup_bpf_replace(struct bpf_link *link, + struct bpf_prog *old_prog, + struct bpf_prog *new_prog) +{ + return -EINVAL; +} + static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 49b1a70e12c8..fd2b2322412d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -18,6 +18,7 @@ #include <linux/refcount.h> #include <linux/mutex.h> #include <linux/module.h> +#include <linux/kallsyms.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -160,6 +161,7 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) } void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); +int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); struct bpf_offload_dev; struct bpf_offloaded_map; @@ -232,6 +234,7 @@ enum bpf_arg_type { ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ + ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ @@ -433,6 +436,16 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SKIP_FRAME BIT(2) +/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 + * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 + */ +#define BPF_MAX_TRAMP_PROGS 40 + +struct bpf_tramp_progs { + struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; + int nr_progs; +}; + /* Different use cases for BPF trampoline: * 1. replace nop at the function entry (kprobe equivalent) * flags = BPF_TRAMP_F_RESTORE_REGS @@ -455,16 +468,25 @@ struct btf_func_model { */ int arch_prepare_bpf_trampoline(void *image, void *image_end, const struct btf_func_model *m, u32 flags, - struct bpf_prog **fentry_progs, int fentry_cnt, - struct bpf_prog **fexit_progs, int fexit_cnt, + struct bpf_tramp_progs *tprogs, void *orig_call); /* these two functions are called from generated trampoline */ u64 notrace __bpf_prog_enter(void); void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); +struct bpf_ksym { + unsigned long start; + unsigned long end; + char name[KSYM_NAME_LEN]; + struct list_head lnode; + struct latch_tree_node tnode; + bool prog; +}; + enum bpf_tramp_prog_type { BPF_TRAMP_FENTRY, BPF_TRAMP_FEXIT, + BPF_TRAMP_MODIFY_RETURN, BPF_TRAMP_MAX, BPF_TRAMP_REPLACE, /* more than MAX */ }; @@ -493,6 +515,7 @@ struct bpf_trampoline { /* Executable image of trampoline */ void *image; u64 selector; + struct bpf_ksym ksym; }; #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ @@ -510,9 +533,10 @@ struct bpf_dispatcher { int num_progs; void *image; u32 image_off; + struct bpf_ksym ksym; }; -static __always_inline unsigned int bpf_dispatcher_nopfunc( +static __always_inline unsigned int bpf_dispatcher_nop_func( const void *ctx, const struct bpf_insn *insnsi, unsigned int (*bpf_func)(const void *, @@ -525,17 +549,21 @@ struct bpf_trampoline *bpf_trampoline_lookup(u64 key); int bpf_trampoline_link_prog(struct bpf_prog *prog); int bpf_trampoline_unlink_prog(struct bpf_prog *prog); void bpf_trampoline_put(struct bpf_trampoline *tr); -#define BPF_DISPATCHER_INIT(name) { \ - .mutex = __MUTEX_INITIALIZER(name.mutex), \ - .func = &name##func, \ - .progs = {}, \ - .num_progs = 0, \ - .image = NULL, \ - .image_off = 0 \ +#define BPF_DISPATCHER_INIT(_name) { \ + .mutex = __MUTEX_INITIALIZER(_name.mutex), \ + .func = &_name##_func, \ + .progs = {}, \ + .num_progs = 0, \ + .image = NULL, \ + .image_off = 0, \ + .ksym = { \ + .name = #_name, \ + .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ + }, \ } #define DEFINE_BPF_DISPATCHER(name) \ - noinline unsigned int name##func( \ + noinline unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ unsigned int (*bpf_func)(const void *, \ @@ -543,26 +571,26 @@ void bpf_trampoline_put(struct bpf_trampoline *tr); { \ return bpf_func(ctx, insnsi); \ } \ - EXPORT_SYMBOL(name##func); \ - struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name); + EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ + struct bpf_dispatcher bpf_dispatcher_##name = \ + BPF_DISPATCHER_INIT(bpf_dispatcher_##name); #define DECLARE_BPF_DISPATCHER(name) \ - unsigned int name##func( \ + unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ unsigned int (*bpf_func)(const void *, \ const struct bpf_insn *)); \ - extern struct bpf_dispatcher name; -#define BPF_DISPATCHER_FUNC(name) name##func -#define BPF_DISPATCHER_PTR(name) (&name) + extern struct bpf_dispatcher bpf_dispatcher_##name; +#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func +#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to); -struct bpf_image { - struct latch_tree_node tnode; - unsigned char data[]; -}; -#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image)) -bool is_bpf_image_address(unsigned long address); -void *bpf_image_alloc(void); +/* Called only from JIT-enabled code, so there's no need for stubs. */ +void *bpf_jit_alloc_exec_page(void); +void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); +void bpf_image_ksym_del(struct bpf_ksym *ksym); +void bpf_ksym_add(struct bpf_ksym *ksym); +void bpf_ksym_del(struct bpf_ksym *ksym); #else static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) { @@ -579,7 +607,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} #define DEFINE_BPF_DISPATCHER(name) #define DECLARE_BPF_DISPATCHER(name) -#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc +#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func #define BPF_DISPATCHER_PTR(name) NULL static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, @@ -640,8 +668,7 @@ struct bpf_prog_aux { void *jit_data; /* JIT specific data. arch dependent */ struct bpf_jit_poke_descriptor *poke_tab; u32 size_poke_tab; - struct latch_tree_node ksym_tnode; - struct list_head ksym_lnode; + struct bpf_ksym ksym; const struct bpf_prog_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; @@ -859,7 +886,7 @@ struct bpf_prog_array_item { struct bpf_prog_array { struct rcu_head rcu; - struct bpf_prog_array_item items[0]; + struct bpf_prog_array_item items[]; }; struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); @@ -885,7 +912,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *_prog; \ struct bpf_prog_array *_array; \ u32 _ret = 1; \ - preempt_disable(); \ + migrate_disable(); \ rcu_read_lock(); \ _array = rcu_dereference(array); \ if (unlikely(check_non_null && !_array))\ @@ -898,7 +925,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, } \ _out: \ rcu_read_unlock(); \ - preempt_enable(); \ + migrate_enable(); \ _ret; \ }) @@ -932,7 +959,7 @@ _out: \ u32 ret; \ u32 _ret = 1; \ u32 _cn = 0; \ - preempt_disable(); \ + migrate_disable(); \ rcu_read_lock(); \ _array = rcu_dereference(array); \ _item = &_array->items[0]; \ @@ -944,7 +971,7 @@ _out: \ _item++; \ } \ rcu_read_unlock(); \ - preempt_enable(); \ + migrate_enable(); \ if (_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ else \ @@ -961,6 +988,36 @@ _out: \ #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); +/* + * Block execution of BPF programs attached to instrumentation (perf, + * kprobes, tracepoints) to prevent deadlocks on map operations as any of + * these events can happen inside a region which holds a map bucket lock + * and can deadlock on it. + * + * Use the preemption safe inc/dec variants on RT because migrate disable + * is preemptible on RT and preemption in the middle of the RMW operation + * might lead to inconsistent state. Use the raw variants for non RT + * kernels as migrate_disable() maps to preempt_disable() so the slightly + * more expensive save operation can be avoided. + */ +static inline void bpf_disable_instrumentation(void) +{ + migrate_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + this_cpu_inc(bpf_prog_active); + else + __this_cpu_inc(bpf_prog_active); +} + +static inline void bpf_enable_instrumentation(void) +{ + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + this_cpu_dec(bpf_prog_active); + else + __this_cpu_dec(bpf_prog_active); + migrate_enable(); +} + extern const struct file_operations bpf_map_fops; extern const struct file_operations bpf_prog_fops; @@ -993,6 +1050,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux, void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); +struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); void bpf_map_inc(struct bpf_map *map); @@ -1025,6 +1083,29 @@ extern int sysctl_unprivileged_bpf_disabled; int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); +struct bpf_link { + atomic64_t refcnt; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + +}; + +void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops, + struct bpf_prog *prog); +void bpf_link_cleanup(struct bpf_link *link, struct file *link_file, + int link_fd); +void bpf_link_inc(struct bpf_link *link); +void bpf_link_put(struct bpf_link *link); +int bpf_link_new_fd(struct bpf_link *link); +struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); +struct bpf_link *bpf_link_get_from_fd(u32 ufd); + int bpf_obj_pin_user(u32 ufd, const char __user *pathname); int bpf_obj_get_user(const char __user *pathname, int flags); @@ -1102,6 +1183,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_tracing(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); @@ -1259,6 +1343,13 @@ static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, return -ENOTSUPP; } +static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} + static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) @@ -1355,6 +1446,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) #if defined(CONFIG_BPF_STREAM_PARSER) int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +void sock_map_unhash(struct sock *sk); +void sock_map_close(struct sock *sk, long timeout); #else static inline int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which) @@ -1367,7 +1460,7 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, { return -EINVAL; } -#endif +#endif /* CONFIG_BPF_STREAM_PARSER */ #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); @@ -1417,6 +1510,7 @@ extern const struct bpf_func_proto bpf_get_stack_proto; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; +extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; extern const struct bpf_func_proto bpf_msg_redirect_map_proto; extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; @@ -1428,6 +1522,10 @@ extern const struct bpf_func_proto bpf_strtol_proto; extern const struct bpf_func_proto bpf_strtoul_proto; extern const struct bpf_func_proto bpf_tcp_sock_proto; extern const struct bpf_func_proto bpf_jiffies64_proto; +extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; + +const struct bpf_func_proto *bpf_tracing_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h new file mode 100644 index 000000000000..af74712af585 --- /dev/null +++ b/include/linux/bpf_lsm.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2020 Google LLC. + */ + +#ifndef _LINUX_BPF_LSM_H +#define _LINUX_BPF_LSM_H + +#include <linux/bpf.h> +#include <linux/lsm_hooks.h> + +#ifdef CONFIG_BPF_LSM + +#define LSM_HOOK(RET, DEFAULT, NAME, ...) \ + RET bpf_lsm_##NAME(__VA_ARGS__); +#include <linux/lsm_hook_defs.h> +#undef LSM_HOOK + +int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog); + +#else /* !CONFIG_BPF_LSM */ + +static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, + const struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_BPF_LSM */ + +#endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index c81d4ece79a4..ba0c2d56f8a3 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -70,6 +70,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops, void *, void *) BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension, void *, void *) +#ifdef CONFIG_BPF_LSM +BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm, + void *, void *) +#endif /* CONFIG_BPF_LSM */ #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5406e6e96585..6abd5a778fcd 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -123,6 +123,10 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + s32 s32_min_value; /* minimum possible (s32)value */ + s32 s32_max_value; /* maximum possible (s32)value */ + u32 u32_min_value; /* minimum possible (u32)value */ + u32 u32_max_value; /* maximum possible (u32)value */ /* parentage chain for liveness checking */ struct bpf_reg_state *parent; /* Inside the callee two registers can be both PTR_TO_STACK like diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index b475e7f20d28..6462c5447872 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -79,6 +79,7 @@ #define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ #define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ #define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ +#define MII_BCM54XX_ECR_FIFOE 0x0001 /* FIFO elasticity */ #define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */ #define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */ @@ -119,6 +120,7 @@ #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800 +#define MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN 0x4000 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07 #define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 7b73ef7f902d..e0b020eaf32e 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -22,9 +22,6 @@ enum bh_state_bits { BH_Dirty, /* Is dirty */ BH_Lock, /* Is locked */ BH_Req, /* Has been submitted for I/O */ - BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise - * IO completion of other buffers in the page - */ BH_Mapped, /* Has a disk mapping */ BH_New, /* Disk mapping was newly created by get_block */ @@ -76,6 +73,9 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ + spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to + * serialise IO completion of other + * buffers in the page */ }; /* diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index c4458dc6a757..76371aaae2d1 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -175,9 +175,10 @@ struct ceph_msg_data { #endif /* CONFIG_BLOCK */ struct ceph_bvec_iter bvec_pos; struct { - struct page **pages; /* NOT OWNER. */ + struct page **pages; size_t length; /* total # bytes */ unsigned int alignment; /* first page */ + bool own_pages; }; struct ceph_pagelist *pagelist; }; @@ -356,8 +357,8 @@ extern void ceph_con_keepalive(struct ceph_connection *con); extern bool ceph_con_keepalive_expired(struct ceph_connection *con, unsigned long interval); -extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, - size_t length, size_t alignment); +void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, + size_t length, size_t alignment, bool own_pages); extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index e081b56f1c1d..5e601975745f 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs); #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id together */ #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */ +#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota, + will set FULL too */ +#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */ struct ceph_pg_pool_info { struct rb_node node; @@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); #endif diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 59bdfd470100..88ed3c5c04c5 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s); /* * osd map flag bits */ -#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ -#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC), + not set since ~luminous */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC), + not set since ~luminous */ #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 63097cb243cb..52661155f85f 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -94,6 +94,11 @@ enum { * Enable legacy local memory.events. */ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), + + /* + * Enable recursive subtree protection + */ + CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6), }; /* cftype->flags */ @@ -628,8 +633,9 @@ struct cgroup_subsys { void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); void (*post_attach)(void); - int (*can_fork)(struct task_struct *task); - void (*cancel_fork)(struct task_struct *task); + int (*can_fork)(struct task_struct *task, + struct css_set *cset); + void (*cancel_fork)(struct task_struct *task, struct css_set *cset); void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); void (*release)(struct task_struct *task); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index d7ddebd0cdec..4598e4da6b1b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -27,6 +27,8 @@ #include <linux/cgroup-defs.h> +struct kernel_clone_args; + #ifdef CONFIG_CGROUPS /* @@ -58,10 +60,8 @@ struct css_task_iter { struct list_head *tcset_head; struct list_head *task_pos; - struct list_head *tasks_head; - struct list_head *mg_tasks_head; - struct list_head *dying_tasks_head; + struct list_head *cur_tasks_head; struct css_set *cur_cset; struct css_set *cur_dcset; struct task_struct *cur_task; @@ -121,9 +121,12 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); void cgroup_fork(struct task_struct *p); -extern int cgroup_can_fork(struct task_struct *p); -extern void cgroup_cancel_fork(struct task_struct *p); -extern void cgroup_post_fork(struct task_struct *p); +extern int cgroup_can_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +extern void cgroup_cancel_fork(struct task_struct *p, + struct kernel_clone_args *kargs); +extern void cgroup_post_fork(struct task_struct *p, + struct kernel_clone_args *kargs); void cgroup_exit(struct task_struct *p); void cgroup_release(struct task_struct *p); void cgroup_free(struct task_struct *p); @@ -707,9 +710,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { return -EINVAL; } static inline void cgroup_fork(struct task_struct *p) {} -static inline int cgroup_can_fork(struct task_struct *p) { return 0; } -static inline void cgroup_cancel_fork(struct task_struct *p) {} -static inline void cgroup_post_fork(struct task_struct *p) {} +static inline int cgroup_can_fork(struct task_struct *p, + struct kernel_clone_args *kargs) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p, + struct kernel_clone_args *kargs) {} +static inline void cgroup_post_fork(struct task_struct *p, + struct kernel_clone_args *kargs) {} static inline void cgroup_exit(struct task_struct *p) {} static inline void cgroup_release(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {} diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 952ac035bab9..bd1ee9039558 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -522,9 +522,9 @@ struct clk *clk_register_gate(struct device *dev, const char *name, * @clk_gate_flags: gate-specific flags for this clock * @lock: shared register lock for this clock */ -#define clk_hw_register_gate_parent_hw(dev, name, parent_name, flags, reg, \ +#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg, \ bit_idx, clk_gate_flags, lock) \ - __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \ + __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \ NULL, (flags), (reg), (bit_idx), \ (clk_gate_flags), (lock)) /** @@ -539,10 +539,10 @@ struct clk *clk_register_gate(struct device *dev, const char *name, * @clk_gate_flags: gate-specific flags for this clock * @lock: shared register lock for this clock */ -#define clk_hw_register_gate_parent_data(dev, name, parent_name, flags, reg, \ +#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg, \ bit_idx, clk_gate_flags, lock) \ - __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \ - NULL, (flags), (reg), (bit_idx), \ + __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \ + (flags), (reg), (bit_idx), \ (clk_gate_flags), (lock)) void clk_unregister_gate(struct clk *clk); void clk_hw_unregister_gate(struct clk_hw *hw); diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 390437887b46..49a53a137610 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -12,6 +12,9 @@ #ifndef AT91_PMC_H #define AT91_PMC_H +#define AT91_PMC_V1 (1) /* PMC version 1 */ +#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */ + #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ @@ -30,16 +33,34 @@ #define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ #define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ +#define AT91_PMC_PLL_CTRL0 0x0C /* PLL Control Register 0 [for SAM9X60] */ +#define AT91_PMC_PLL_CTRL0_ENPLL (1 << 28) /* Enable PLL */ +#define AT91_PMC_PLL_CTRL0_ENPLLCK (1 << 29) /* Enable PLL clock for PMC */ +#define AT91_PMC_PLL_CTRL0_ENLOCK (1 << 31) /* Enable PLL lock */ + +#define AT91_PMC_PLL_CTRL1 0x10 /* PLL Control Register 1 [for SAM9X60] */ + #define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */ #define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */ #define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */ +#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */ +#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */ +#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */ +#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */ +#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */ + #define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */ #define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ #define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ #define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ #define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */ +#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ +#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ +#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ +#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ + #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ #define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ #define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */ @@ -180,6 +201,8 @@ #define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */ #define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */ +#define AT91_PMC_PLL_ISR0 0xEC /* PLL Interrupt Status Register 0 [SAM9X60 only] */ + #define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/ #define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index b21db536fd52..86d143db6523 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -23,18 +23,31 @@ struct clocksource; struct module; -#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA +#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \ + defined(CONFIG_GENERIC_GETTIMEOFDAY) #include <asm/clocksource.h> #endif +#include <vdso/clocksource.h> + /** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. * This is the structure used for system time. * - * @name: ptr to clocksource name - * @list: list head for registration - * @rating: rating value for selection (higher is better) + * @read: Returns a cycle value, passes clocksource as argument + * @mask: Bitmask for two's complement + * subtraction of non 64 bit counters + * @mult: Cycle to nanosecond multiplier + * @shift: Cycle to nanosecond divisor (power of two) + * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) + * @maxadj: Maximum adjustment value to mult (~11%) + * @archdata: Optional arch-specific data + * @max_cycles: Maximum safe cycle value which won't overflow on + * multiplication + * @name: Pointer to clocksource name + * @list: List head for registration (internal) + * @rating: Rating value for selection (higher is better) * To avoid rating inflation the following * list should give you a guide as to how * to assign your clocksource a rating @@ -49,27 +62,23 @@ struct module; * 400-499: Perfect * The ideal clocksource. A must-use where * available. - * @read: returns a cycle value, passes clocksource as argument - * @enable: optional function to enable the clocksource - * @disable: optional function to disable the clocksource - * @mask: bitmask for two's complement - * subtraction of non 64 bit counters - * @mult: cycle to nanosecond multiplier - * @shift: cycle to nanosecond divisor (power of two) - * @max_idle_ns: max idle time permitted by the clocksource (nsecs) - * @maxadj: maximum adjustment value to mult (~11%) - * @max_cycles: maximum safe cycle value which won't overflow on multiplication - * @flags: flags describing special properties - * @archdata: arch-specific data - * @suspend: suspend function for the clocksource, if necessary - * @resume: resume function for the clocksource, if necessary + * @flags: Flags describing special properties + * @enable: Optional function to enable the clocksource + * @disable: Optional function to disable the clocksource + * @suspend: Optional suspend function for the clocksource + * @resume: Optional resume function for the clocksource * @mark_unstable: Optional function to inform the clocksource driver that * the watchdog marked the clocksource unstable - * @owner: module reference, must be set by clocksource in modules + * @tick_stable: Optional function called periodically from the watchdog + * code to provide stable syncrhonization points + * @wd_list: List head to enqueue into the watchdog list (internal) + * @cs_last: Last clocksource value for clocksource watchdog + * @wd_last: Last watchdog value corresponding to @cs_last + * @owner: Module reference, must be set by clocksource in modules * * Note: This struct is not used in hotpathes of the timekeeping code * because the timekeeper caches the hot path fields in its own data - * structure, so no line cache alignment is required, + * structure, so no cache line alignment is required, * * The pointer to the clocksource itself is handed to the read * callback. If you need extra information there you can wrap struct @@ -78,35 +87,37 @@ struct module; * structure. */ struct clocksource { - u64 (*read)(struct clocksource *cs); - u64 mask; - u32 mult; - u32 shift; - u64 max_idle_ns; - u32 maxadj; + u64 (*read)(struct clocksource *cs); + u64 mask; + u32 mult; + u32 shift; + u64 max_idle_ns; + u32 maxadj; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif - u64 max_cycles; - const char *name; - struct list_head list; - int rating; - int (*enable)(struct clocksource *cs); - void (*disable)(struct clocksource *cs); - unsigned long flags; - void (*suspend)(struct clocksource *cs); - void (*resume)(struct clocksource *cs); - void (*mark_unstable)(struct clocksource *cs); - void (*tick_stable)(struct clocksource *cs); + u64 max_cycles; + const char *name; + struct list_head list; + int rating; + enum vdso_clock_mode vdso_clock_mode; + unsigned long flags; + + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); + void (*suspend)(struct clocksource *cs); + void (*resume)(struct clocksource *cs); + void (*mark_unstable)(struct clocksource *cs); + void (*tick_stable)(struct clocksource *cs); /* private: */ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG /* Watchdog related data, used by the framework */ - struct list_head wd_list; - u64 cs_last; - u64 wd_last; + struct list_head wd_list; + u64 cs_last; + u64 wd_last; #endif - struct module *owner; + struct module *owner; }; /* diff --git a/include/linux/compat.h b/include/linux/compat.h index df2475be134a..0480ba4db592 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -454,12 +454,13 @@ extern void __user *compat_alloc_user_space(unsigned long len); int compat_restore_altstack(const compat_stack_t __user *uss); int __compat_save_altstack(compat_stack_t __user *, unsigned long); -#define compat_save_altstack_ex(uss, sp) do { \ +#define unsafe_compat_save_altstack(uss, sp, label) do { \ compat_stack_t __user *__uss = uss; \ struct task_struct *t = current; \ - put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ - put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ - put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \ + &__uss->ss_sp, label); \ + unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ + unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ if (t->sas_ss_flags & SS_AUTODISARM) \ sas_ss_reset(t); \ } while (0); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5e88e7e33abe..034b0a644efc 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -347,7 +347,7 @@ static inline void *offset_to_ptr(const int *off) * compiler has support to do so. */ #define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) #define compiletime_assert_atomic_type(t) \ compiletime_assert(__native_word(t), \ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 72393a8c1a6c..e970f97a7fcb 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -129,22 +129,13 @@ struct ftrace_likely_data { #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) /* - * Force always-inline if the user requests it so via the .config. * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 * semantics rather than c99. This prevents multiple symbol definition errors * of extern inline functions at link time. * A lot of inline functions can cause havoc with function tracing. - * Do not use __always_inline here, since currently it expands to inline again - * (which would break users of __always_inline). */ -#if !defined(CONFIG_OPTIMIZE_INLINING) -#define inline inline __attribute__((__always_inline__)) __gnu_inline \ - __inline_maybe_unused notrace -#else -#define inline inline __gnu_inline \ - __inline_maybe_unused notrace -#endif +#define inline inline __gnu_inline __inline_maybe_unused notrace /* * gcc provides both __inline__ and __inline as alternate spellings of diff --git a/include/linux/completion.h b/include/linux/completion.h index 519e94915d18..bf8e77001f18 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -9,7 +9,7 @@ * See kernel/sched/completion.c for details. */ -#include <linux/wait.h> +#include <linux/swait.h> /* * struct completion - structure used to maintain state for a "completion" @@ -25,7 +25,7 @@ */ struct completion { unsigned int done; - wait_queue_head_t wait; + struct swait_queue_head wait; }; #define init_completion_map(x, m) __init_completion(x) @@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} #define COMPLETION_INITIALIZER(work) \ - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ (*({ init_completion_map(&(work), &(map)); &(work); })) @@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {} static inline void __init_completion(struct completion *x) { x->done = 0; - init_waitqueue_head(&x->wait); + init_swait_queue_head(&x->wait); } /** diff --git a/include/linux/console.h b/include/linux/console.h index f33016b3a401..7a140f4e5d0c 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -148,6 +148,7 @@ struct console { struct tty_driver *(*device)(struct console *, int *); void (*unblank)(void); int (*setup)(struct console *, char *); + int (*exit)(struct console *); int (*match)(struct console *, char *name, int idx, char *options); short flags; short index; diff --git a/include/linux/const.h b/include/linux/const.h index 7b55a55f5911..81b8aae5a855 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -1,9 +1,6 @@ #ifndef _LINUX_CONST_H #define _LINUX_CONST_H -#include <uapi/linux/const.h> - -#define UL(x) (_UL(x)) -#define ULL(x) (_ULL(x)) +#include <vdso/const.h> #endif /* _LINUX_CONST_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 44e552de419c..193cc9dbf448 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -41,6 +41,7 @@ enum coresight_dev_type { CORESIGHT_DEV_TYPE_LINKSINK, CORESIGHT_DEV_TYPE_SOURCE, CORESIGHT_DEV_TYPE_HELPER, + CORESIGHT_DEV_TYPE_ECT, }; enum coresight_dev_subtype_sink { @@ -68,6 +69,12 @@ enum coresight_dev_subtype_helper { CORESIGHT_DEV_SUBTYPE_HELPER_CATU, }; +/* Embedded Cross Trigger (ECT) sub-types */ +enum coresight_dev_subtype_ect { + CORESIGHT_DEV_SUBTYPE_ECT_NONE, + CORESIGHT_DEV_SUBTYPE_ECT_CTI, +}; + /** * union coresight_dev_subtype - further characterisation of a type * @sink_subtype: type of sink this component is, as defined @@ -78,6 +85,8 @@ enum coresight_dev_subtype_helper { * by @coresight_dev_subtype_source. * @helper_subtype: type of helper this component is, as defined * by @coresight_dev_subtype_helper. + * @ect_subtype: type of cross trigger this component is, as + * defined by @coresight_dev_subtype_ect */ union coresight_dev_subtype { /* We have some devices which acts as LINK and SINK */ @@ -87,6 +96,7 @@ union coresight_dev_subtype { }; enum coresight_dev_subtype_source source_subtype; enum coresight_dev_subtype_helper helper_subtype; + enum coresight_dev_subtype_ect ect_subtype; }; /** @@ -153,6 +163,8 @@ struct coresight_connection { * activated but not yet enabled. Enabling for a _sink_ * appens when a source has been selected for that it. * @ea: Device attribute for sink representation under PMU directory. + * @ect_dev: Associated cross trigger device. Not part of the trace data + * path or connections. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -166,6 +178,8 @@ struct coresight_device { /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ struct dev_ext_attribute *ea; + /* cross trigger handling */ + struct coresight_device *ect_dev; }; /* @@ -196,6 +210,7 @@ static struct coresight_dev_list (var) = { \ #define sink_ops(csdev) csdev->ops->sink_ops #define link_ops(csdev) csdev->ops->link_ops #define helper_ops(csdev) csdev->ops->helper_ops +#define ect_ops(csdev) csdev->ops->ect_ops /** * struct coresight_ops_sink - basic operations for a sink @@ -262,11 +277,23 @@ struct coresight_ops_helper { int (*disable)(struct coresight_device *csdev, void *data); }; +/** + * struct coresight_ops_ect - Ops for an embedded cross trigger device + * + * @enable : Enable the device + * @disable : Disable the device + */ +struct coresight_ops_ect { + int (*enable)(struct coresight_device *csdev); + int (*disable)(struct coresight_device *csdev); +}; + struct coresight_ops { const struct coresight_ops_sink *sink_ops; const struct coresight_ops_link *link_ops; const struct coresight_ops_source *source_ops; const struct coresight_ops_helper *helper_ops; + const struct coresight_ops_ect *ect_ops; }; #ifdef CONFIG_CORESIGHT diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 1ca2baf817ed..beaed2dc269e 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -88,10 +88,13 @@ extern ssize_t arch_cpu_release(const char *, size_t); #ifdef CONFIG_SMP extern bool cpuhp_tasks_frozen; -int cpu_up(unsigned int cpu); +int add_cpu(unsigned int cpu); +int cpu_device_up(struct device *dev); void notify_cpu_starting(unsigned int cpu); extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); +int bringup_hibernate_cpu(unsigned int sleep_cpu); +void bringup_nonboot_cpus(unsigned int setup_max_cpus); #else /* CONFIG_SMP */ #define cpuhp_tasks_frozen 0 @@ -117,7 +120,9 @@ extern void lockdep_assert_cpus_held(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); void clear_tasks_mm_cpumask(int cpu); -int cpu_down(unsigned int cpu); +int remove_cpu(unsigned int cpu); +int cpu_device_down(struct device *dev); +extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); #else /* CONFIG_HOTPLUG_CPU */ @@ -129,6 +134,7 @@ static inline int cpus_read_trylock(void) { return true; } static inline void lockdep_assert_cpus_held(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } +static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ /* Wrappers which go away once all code is converted */ @@ -138,12 +144,18 @@ static inline void get_online_cpus(void) { cpus_read_lock(); } static inline void put_online_cpus(void) { cpus_read_unlock(); } #ifdef CONFIG_PM_SLEEP_SMP -extern int freeze_secondary_cpus(int primary); +int __freeze_secondary_cpus(int primary, bool suspend); +static inline int freeze_secondary_cpus(int primary) +{ + return __freeze_secondary_cpus(primary, true); +} + static inline int disable_nonboot_cpus(void) { - return freeze_secondary_cpus(0); + return __freeze_secondary_cpus(0, false); } -extern void enable_nonboot_cpus(void); + +void enable_nonboot_cpus(void); static inline int suspend_disable_secondary_cpus(void) { diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 0fb561d1b524..f7240251a949 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -205,6 +205,7 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy) unsigned int cpufreq_get(unsigned int cpu); unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); +unsigned int cpufreq_get_hw_max_freq(unsigned int cpu); void disable_cpufreq(void); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); @@ -232,6 +233,10 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) { return 0; } +static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) +{ + return 0; +} static inline void disable_cpufreq(void) { } #endif diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d37c17e68268..77d70b633531 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -102,6 +102,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index d5cc88514aee..f0d895d6ac39 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -194,6 +194,11 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node) return 0; } +static inline int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p) { + return cpumask_next_and(-1, src1p, src2p); +} + #define for_each_cpu(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) #define for_each_cpu_not(cpu, mask) \ @@ -245,6 +250,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); unsigned int cpumask_local_spread(unsigned int i, int node); +int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p); /** * for_each_cpu - iterate over every cpu in a mask diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 6b64b6cc2175..07e547c02fd8 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -198,7 +198,7 @@ enum dccp_role { struct dccp_service_list { __u32 dccpsl_nr; - __be32 dccpsl_list[0]; + __be32 dccpsl_list[]; }; #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 3d013de64f70..a274d95fa66e 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -35,6 +35,7 @@ struct debugfs_regset32 { const struct debugfs_reg32 *regs; int nregs; void __iomem *base; + struct device *dev; /* Optional device for Runtime PM */ }; extern struct dentry *arch_debugfs_dir; @@ -67,10 +68,10 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); -struct dentry *debugfs_create_file_size(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops, - loff_t file_size); +void debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size); struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); @@ -127,9 +128,9 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob); -struct dentry *debugfs_create_regset32(const char *name, umode_t mode, - struct dentry *parent, - struct debugfs_regset32 *regset); +void debugfs_create_regset32(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset); void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); @@ -181,13 +182,11 @@ static inline struct dentry *debugfs_create_file_unsafe(const char *name, return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops, - loff_t file_size) -{ - return ERR_PTR(-ENODEV); -} +static inline void debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size) +{ } static inline struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) @@ -304,11 +303,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, return ERR_PTR(-ENODEV); } -static inline struct dentry *debugfs_create_regset32(const char *name, - umode_t mode, struct dentry *parent, - struct debugfs_regset32 *regset) +static inline void debugfs_create_regset32(const char *name, umode_t mode, + struct dentry *parent, + struct debugfs_regset32 *regset) { - return ERR_PTR(-ENODEV); } static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index c6f82d4bec9f..57e871a559a9 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -158,7 +158,7 @@ struct devfreq_stats { * functions except for the context of callbacks defined in struct * devfreq_governor, the governor should protect its access with the * struct mutex lock in struct devfreq. A governor may use this mutex - * to protect its own private data in void *data as well. + * to protect its own private data in ``void *data`` as well. */ struct devfreq { struct list_head node; @@ -201,24 +201,23 @@ struct devfreq_freqs { }; #if defined(CONFIG_PM_DEVFREQ) -extern struct devfreq *devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, - const char *governor_name, - void *data); -extern int devfreq_remove_device(struct devfreq *devfreq); -extern struct devfreq *devm_devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, - const char *governor_name, - void *data); -extern void devm_devfreq_remove_device(struct device *dev, - struct devfreq *devfreq); +struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +int devfreq_remove_device(struct devfreq *devfreq); +struct devfreq *devm_devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data); +void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq); /* Supposed to be called by PM callbacks */ -extern int devfreq_suspend_device(struct devfreq *devfreq); -extern int devfreq_resume_device(struct devfreq *devfreq); +int devfreq_suspend_device(struct devfreq *devfreq); +int devfreq_resume_device(struct devfreq *devfreq); -extern void devfreq_suspend(void); -extern void devfreq_resume(void); +void devfreq_suspend(void); +void devfreq_resume(void); /** * update_devfreq() - Reevaluate the device and configure frequency @@ -226,39 +225,38 @@ extern void devfreq_resume(void); * * Note: devfreq->lock must be held */ -extern int update_devfreq(struct devfreq *devfreq); +int update_devfreq(struct devfreq *devfreq); /* Helper functions for devfreq user device driver with OPP. */ -extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, - unsigned long *freq, u32 flags); -extern int devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq); -extern int devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq); -extern int devm_devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq); -extern void devm_devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq); -extern int devfreq_register_notifier(struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list); -extern int devfreq_unregister_notifier(struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list); -extern int devm_devfreq_register_notifier(struct device *dev, +struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, + unsigned long *freq, u32 flags); +int devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +int devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); +int devm_devfreq_register_opp_notifier(struct device *dev, + struct devfreq *devfreq); +void devm_devfreq_unregister_opp_notifier(struct device *dev, + struct devfreq *devfreq); +int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +int devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); -extern void devm_devfreq_unregister_notifier(struct device *dev, +void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list); -extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, - int index); +struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index); #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** - * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq + * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq * and devfreq_add_device * @upthreshold: If the load is over this value, the frequency jumps. * Specify 0 to use the default. Valid value = 0 to 100. @@ -278,7 +276,7 @@ struct devfreq_simple_ondemand_data { #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) /** - * struct devfreq_passive_data - void *data fed to struct devfreq + * struct devfreq_passive_data - ``void *data`` fed to struct devfreq * and devfreq_add_device * @parent: the devfreq instance of parent device. * @get_target_freq: Optional callback, Returns desired operating frequency @@ -311,9 +309,9 @@ struct devfreq_passive_data { #else /* !CONFIG_PM_DEVFREQ */ static inline struct devfreq *devfreq_add_device(struct device *dev, - struct devfreq_dev_profile *profile, - const char *governor_name, - void *data) + struct devfreq_dev_profile *profile, + const char *governor_name, + void *data) { return ERR_PTR(-ENOSYS); } @@ -350,31 +348,31 @@ static inline void devfreq_suspend(void) {} static inline void devfreq_resume(void) {} static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, - unsigned long *freq, u32 flags) + unsigned long *freq, u32 flags) { return ERR_PTR(-EINVAL); } static inline int devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { return -EINVAL; } static inline int devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { return -EINVAL; } static inline int devm_devfreq_register_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { return -EINVAL; } static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, - struct devfreq *devfreq) + struct devfreq *devfreq) { } @@ -393,22 +391,22 @@ static inline int devfreq_unregister_notifier(struct devfreq *devfreq, } static inline int devm_devfreq_register_notifier(struct device *dev, - struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list) + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) { return 0; } static inline void devm_devfreq_unregister_notifier(struct device *dev, - struct devfreq *devfreq, - struct notifier_block *nb, - unsigned int list) + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) { } static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, - int index) + int index) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/device.h b/include/linux/device.h index 0cd7c647c16c..1311f276f533 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -798,6 +798,17 @@ static inline struct device_node *dev_of_node(struct device *dev) return dev->of_node; } +static inline bool dev_has_sync_state(struct device *dev) +{ + if (!dev) + return false; + if (dev->driver && dev->driver->sync_state) + return true; + if (dev->bus && dev->bus->sync_state) + return true; + return false; +} + /* * High level routines for use by the bus drivers */ @@ -817,6 +828,7 @@ extern struct device *device_find_child_by_name(struct device *parent, extern int device_rename(struct device *dev, const char *new_name); extern int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); +extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); extern const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp); diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index 1188260f9a02..ee7ba5b5417e 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -236,9 +236,9 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev) } #endif +extern int driver_deferred_probe_timeout; void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); -int driver_deferred_probe_check_state_continue(struct device *dev); void driver_init(void); /** diff --git a/include/linux/dio.h b/include/linux/dio.h index 1470d1d943b4..5abd07361eb5 100644 --- a/include/linux/dio.h +++ b/include/linux/dio.h @@ -247,11 +247,6 @@ extern int dio_create_sysfs_dev_files(struct dio_dev *); /* New-style probing */ extern int dio_register_driver(struct dio_driver *); extern void dio_unregister_driver(struct dio_driver *); -extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z); -static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d) -{ - return d->driver; -} #define dio_resource_start(d) ((d)->resource.start) #define dio_resource_end(d) ((d)->resource.end) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index abf5459a5b9d..1ade486fc2bb 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -43,18 +43,6 @@ struct dma_buf_ops { bool cache_sgt_mapping; /** - * @dynamic_mapping: - * - * If true the framework makes sure that the map/unmap_dma_buf - * callbacks are always called with the dma_resv object locked. - * - * If false the framework makes sure that the map/unmap_dma_buf - * callbacks are always called without the dma_resv object locked. - * Mutual exclusive with @cache_sgt_mapping. - */ - bool dynamic_mapping; - - /** * @attach: * * This is called from dma_buf_attach() to make sure that a given @@ -94,13 +82,42 @@ struct dma_buf_ops { void (*detach)(struct dma_buf *, struct dma_buf_attachment *); /** + * @pin: + * + * This is called by dma_buf_pin and lets the exporter know that the + * DMA-buf can't be moved any more. + * + * This is called with the dmabuf->resv object locked and is mutual + * exclusive with @cache_sgt_mapping. + * + * This callback is optional and should only be used in limited use + * cases like scanout and not for temporary pin operations. + * + * Returns: + * + * 0 on success, negative error code on failure. + */ + int (*pin)(struct dma_buf_attachment *attach); + + /** + * @unpin: + * + * This is called by dma_buf_unpin and lets the exporter know that the + * DMA-buf can be moved again. + * + * This is called with the dmabuf->resv object locked and is mutual + * exclusive with @cache_sgt_mapping. + * + * This callback is optional. + */ + void (*unpin)(struct dma_buf_attachment *attach); + + /** * @map_dma_buf: * * This is called by dma_buf_map_attachment() and is used to map a * shared &dma_buf into device address space, and it is mandatory. It - * can only be called if @attach has been called successfully. This - * essentially pins the DMA buffer into place, and it cannot be moved - * any more + * can only be called if @attach has been called successfully. * * This call may sleep, e.g. when the backing storage first needs to be * allocated, or moved to a location suitable for all currently attached @@ -141,9 +158,8 @@ struct dma_buf_ops { * * This is called by dma_buf_unmap_attachment() and should unmap and * release the &sg_table allocated in @map_dma_buf, and it is mandatory. - * It should also unpin the backing storage if this is the last mapping - * of the DMA buffer, it the exporter supports backing storage - * migration. + * For static dma_buf handling this might also unpins the backing + * storage if this is the last mapping of the DMA buffer. */ void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, @@ -312,6 +328,34 @@ struct dma_buf { }; /** + * struct dma_buf_attach_ops - importer operations for an attachment + * @move_notify: [optional] notification that the DMA-buf is moving + * + * Attachment operations implemented by the importer. + */ +struct dma_buf_attach_ops { + /** + * @move_notify + * + * If this callback is provided the framework can avoid pinning the + * backing store while mappings exists. + * + * This callback is called with the lock of the reservation object + * associated with the dma_buf held and the mapping function must be + * called with this lock held as well. This makes sure that no mapping + * is created concurrently with an ongoing move operation. + * + * Mappings stay valid and are not directly affected by this callback. + * But the DMA-buf can now be in a different physical location, so all + * mappings should be destroyed and re-created as soon as possible. + * + * New mappings can be created after this callback returns, and will + * point to the new location of the DMA-buf. + */ + void (*move_notify)(struct dma_buf_attachment *attach); +}; + +/** * struct dma_buf_attachment - holds device-buffer attachment data * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. @@ -319,8 +363,9 @@ struct dma_buf { * @sgt: cached mapping. * @dir: direction of cached mapping. * @priv: exporter specific attachment data. - * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the - * dma_resv lock held. + * @importer_ops: importer operations for this attachment, if provided + * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. + * @importer_priv: importer specific attachment data. * * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device @@ -337,7 +382,8 @@ struct dma_buf_attachment { struct list_head node; struct sg_table *sgt; enum dma_data_direction dir; - bool dynamic_mapping; + const struct dma_buf_attach_ops *importer_ops; + void *importer_priv; void *priv; }; @@ -399,7 +445,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf) */ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) { - return dmabuf->ops->dynamic_mapping; + return !!dmabuf->ops->pin; } /** @@ -413,16 +459,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) static inline bool dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) { - return attach->dynamic_mapping; + return !!attach->importer_ops; } struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); struct dma_buf_attachment * dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, - bool dynamic_mapping); + const struct dma_buf_attach_ops *importer_ops, + void *importer_priv); void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach); +int dma_buf_pin(struct dma_buf_attachment *attach); +void dma_buf_unpin(struct dma_buf_attachment *attach); struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index ca9b5770caee..b59f1b6be3e9 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -108,7 +108,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ -void *uncached_kernel_address(void *addr); -void *cached_kernel_address(void *addr); +void *arch_dma_set_uncached(void *addr, size_t size); +void arch_dma_clear_uncached(void *addr, size_t size); #endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 64461fc64e1b..21065c04c4ac 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -300,6 +300,8 @@ struct dma_router { * @chan_id: channel ID for sysfs * @dev: class device for sysfs * @name: backlink name for sysfs + * @dbg_client_name: slave name for debugfs in format: + * dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx" * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client_count: how many clients are using this channel @@ -318,6 +320,9 @@ struct dma_chan { int chan_id; struct dma_chan_dev *dev; const char *name; +#ifdef CONFIG_DEBUG_FS + char *dbg_client_name; +#endif struct list_head device_node; struct dma_chan_percpu __percpu *local; @@ -618,10 +623,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) { - if (tx->unmap) { - dmaengine_unmap_put(tx->unmap); - tx->unmap = NULL; - } + if (!tx->unmap) + return; + + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; } #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH @@ -805,7 +811,9 @@ struct dma_filter { * called and there are no further references to this structure. This * must be implemented to free resources however many existing drivers * do not and are therefore not safe to unbind while in use. - * + * @dbg_summary_show: optional routine to show contents in debugfs; default code + * will be used when this is omitted, but custom code can show extra, + * controller specific information. */ struct dma_device { struct kref ref; @@ -891,6 +899,11 @@ struct dma_device { struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); void (*device_release)(struct dma_device *dev); + /* debugfs support */ +#ifdef CONFIG_DEBUG_FS + void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); + struct dentry *dbg_dev_root; +#endif }; static inline int dmaengine_slave_config(struct dma_chan *chan, @@ -1155,14 +1168,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc static inline bool dmaengine_check_align(enum dmaengine_alignment align, size_t off1, size_t off2, size_t len) { - size_t mask; - - if (!align) - return true; - mask = (1 << align) - 1; - if (mask & (off1 | off2 | len)) - return false; - return true; + return !(((1 << align) - 1) & (off1 | off2 | len)); } static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, @@ -1236,9 +1242,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) { if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) return dma_dev_to_maxpq(dma); - else if (dmaf_p_disabled_continue(flags)) + if (dmaf_p_disabled_continue(flags)) return dma_dev_to_maxpq(dma) - 1; - else if (dmaf_continue(flags)) + if (dmaf_continue(flags)) return dma_dev_to_maxpq(dma) - 3; BUG(); } @@ -1249,7 +1255,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, if (inc) { if (dir_icg) return dir_icg; - else if (sgl) + if (sgl) return icg; } @@ -1415,11 +1421,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, static inline void dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) { - if (st) { - st->last = last; - st->used = used; - st->residue = residue; - } + if (!st) + return; + + st->last = last; + st->used = used; + st->residue = residue; } #ifdef CONFIG_DMA_ENGINE @@ -1496,12 +1503,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) if (ret) return ret; - if (caps.descriptor_reuse) { - tx->flags |= DMA_CTRL_REUSE; - return 0; - } else { + if (!caps.descriptor_reuse) return -EPERM; - } + + tx->flags |= DMA_CTRL_REUSE; + return 0; } static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) @@ -1517,10 +1523,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) { /* this is supported for reusable desc, so check that */ - if (dmaengine_desc_test_reuse(desc)) - return desc->desc_free(desc); - else + if (!dmaengine_desc_test_reuse(desc)) return -EPERM; + + return desc->desc_free(desc); } /* --- DMA device --- */ @@ -1566,9 +1572,7 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir) case DMA_DEV_TO_DEV: return "DEV_TO_DEV"; default: - break; + return "invalid"; } - - return "invalid"; } #endif /* DMAENGINE_H */ diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f64ca27dc210..d7bf029df737 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -69,19 +69,23 @@ struct dmar_pci_notify_info { extern struct rw_semaphore dmar_global_lock; extern struct list_head dmar_drhd_units; -#define for_each_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) +#define for_each_drhd_unit(drhd) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) #define for_each_active_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) \ if (drhd->ignored) {} else #define for_each_active_iommu(i, drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) \ if (i=drhd->iommu, drhd->ignored) {} else #define for_each_iommu(i, drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ + dmar_rcu_check()) \ if (i=drhd->iommu, 0) {} else static inline bool dmar_rcu_check(void) diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 0aa803c451a3..c620d9139c28 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb); - #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb) -{ - return NULL; -} - #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 14f072edbca5..82ebf9223948 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h @@ -25,7 +25,6 @@ struct dw_apb_timer { struct dw_apb_clock_event_device { struct clock_event_device ced; struct dw_apb_timer timer; - struct irqaction irqaction; void (*eoi)(struct dw_apb_timer *); }; diff --git a/include/linux/edac.h b/include/linux/edac.h index cc31b9742684..0f20b986b0ab 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -383,6 +383,9 @@ struct dimm_info { unsigned int csrow, cschannel; /* Points to the old API data */ u16 smbios_handle; /* Handle for SMBIOS type 17 */ + + u32 ce_count; + u32 ue_count; }; /** @@ -442,6 +445,7 @@ struct errcount_attribute_data { * struct edac_raw_error_desc - Raw error report structure * @grain: minimum granularity for an error report, in bytes * @error_count: number of errors of the same type + * @type: severity of the error (CE/UE/Fatal) * @top_layer: top layer of the error (layer[0]) * @mid_layer: middle layer of the error (layer[1]) * @low_layer: low layer of the error (layer[2]) @@ -453,8 +457,6 @@ struct errcount_attribute_data { * @location: location of the error * @label: label of the affected DIMM(s) * @other_detail: other driver-specific detail about the error - * @enable_per_layer_report: if false, the error affects all layers - * (typically, a memory controller error) */ struct edac_raw_error_desc { char location[LOCATION_SIZE]; @@ -462,6 +464,7 @@ struct edac_raw_error_desc { long grain; u16 error_count; + enum hw_event_mc_err_type type; int top_layer; int mid_layer; int low_layer; @@ -470,7 +473,6 @@ struct edac_raw_error_desc { unsigned long syndrome; const char *msg; const char *other_detail; - bool enable_per_layer_report; }; /* MEMORY controller information structure @@ -560,7 +562,6 @@ struct mem_ctl_info { */ u32 ce_noinfo_count, ue_noinfo_count; u32 ue_mc, ce_mc; - u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; struct completion complete; diff --git a/include/linux/efi.h b/include/linux/efi.h index 7efd7072cca5..251f1f783cdf 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -56,19 +56,6 @@ typedef void *efi_handle_t; #define __efiapi #endif -#define efi_get_handle_at(array, idx) \ - (efi_is_native() ? (array)[idx] \ - : (efi_handle_t)(unsigned long)((u32 *)(array))[idx]) - -#define efi_get_handle_num(size) \ - ((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32))) - -#define for_each_efi_handle(handle, array, size, i) \ - for (i = 0; \ - i < efi_get_handle_num(size) && \ - ((handle = efi_get_handle_at((array), i)) || true); \ - i++) - /* * The UEFI spec and EDK2 reference implementation both define EFI_GUID as * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment @@ -157,15 +144,6 @@ typedef struct { u32 imagesize; } efi_capsule_header_t; -struct efi_boot_memmap { - efi_memory_desc_t **map; - unsigned long *map_size; - unsigned long *desc_size; - u32 *desc_ver; - unsigned long *key_ptr; - unsigned long *buff_size; -}; - /* * EFI capsule flags */ @@ -187,14 +165,6 @@ struct capsule_info { int __efi_capsule_setup_info(struct capsule_info *cap_info); -/* - * Allocation types for calls to boottime->allocate_pages. - */ -#define EFI_ALLOCATE_ANY_PAGES 0 -#define EFI_ALLOCATE_MAX_ADDRESS 1 -#define EFI_ALLOCATE_ADDRESS 2 -#define EFI_MAX_ALLOCATE_TYPE 3 - typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); /* @@ -224,291 +194,7 @@ typedef struct { u8 sets_to_zero; } efi_time_cap_t; -typedef struct { - efi_table_hdr_t hdr; - u32 raise_tpl; - u32 restore_tpl; - u32 allocate_pages; - u32 free_pages; - u32 get_memory_map; - u32 allocate_pool; - u32 free_pool; - u32 create_event; - u32 set_timer; - u32 wait_for_event; - u32 signal_event; - u32 close_event; - u32 check_event; - u32 install_protocol_interface; - u32 reinstall_protocol_interface; - u32 uninstall_protocol_interface; - u32 handle_protocol; - u32 __reserved; - u32 register_protocol_notify; - u32 locate_handle; - u32 locate_device_path; - u32 install_configuration_table; - u32 load_image; - u32 start_image; - u32 exit; - u32 unload_image; - u32 exit_boot_services; - u32 get_next_monotonic_count; - u32 stall; - u32 set_watchdog_timer; - u32 connect_controller; - u32 disconnect_controller; - u32 open_protocol; - u32 close_protocol; - u32 open_protocol_information; - u32 protocols_per_handle; - u32 locate_handle_buffer; - u32 locate_protocol; - u32 install_multiple_protocol_interfaces; - u32 uninstall_multiple_protocol_interfaces; - u32 calculate_crc32; - u32 copy_mem; - u32 set_mem; - u32 create_event_ex; -} __packed efi_boot_services_32_t; - -/* - * EFI Boot Services table - */ -typedef union { - struct { - efi_table_hdr_t hdr; - void *raise_tpl; - void *restore_tpl; - efi_status_t (__efiapi *allocate_pages)(int, int, unsigned long, - efi_physical_addr_t *); - efi_status_t (__efiapi *free_pages)(efi_physical_addr_t, - unsigned long); - efi_status_t (__efiapi *get_memory_map)(unsigned long *, void *, - unsigned long *, - unsigned long *, u32 *); - efi_status_t (__efiapi *allocate_pool)(int, unsigned long, - void **); - efi_status_t (__efiapi *free_pool)(void *); - void *create_event; - void *set_timer; - void *wait_for_event; - void *signal_event; - void *close_event; - void *check_event; - void *install_protocol_interface; - void *reinstall_protocol_interface; - void *uninstall_protocol_interface; - efi_status_t (__efiapi *handle_protocol)(efi_handle_t, - efi_guid_t *, void **); - void *__reserved; - void *register_protocol_notify; - efi_status_t (__efiapi *locate_handle)(int, efi_guid_t *, - void *, unsigned long *, - efi_handle_t *); - void *locate_device_path; - efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *, - void *); - void *load_image; - void *start_image; - void *exit; - void *unload_image; - efi_status_t (__efiapi *exit_boot_services)(efi_handle_t, - unsigned long); - void *get_next_monotonic_count; - void *stall; - void *set_watchdog_timer; - void *connect_controller; - efi_status_t (__efiapi *disconnect_controller)(efi_handle_t, - efi_handle_t, - efi_handle_t); - void *open_protocol; - void *close_protocol; - void *open_protocol_information; - void *protocols_per_handle; - void *locate_handle_buffer; - efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *, - void **); - void *install_multiple_protocol_interfaces; - void *uninstall_multiple_protocol_interfaces; - void *calculate_crc32; - void *copy_mem; - void *set_mem; - void *create_event_ex; - }; - efi_boot_services_32_t mixed_mode; -} efi_boot_services_t; - -typedef enum { - EfiPciIoWidthUint8, - EfiPciIoWidthUint16, - EfiPciIoWidthUint32, - EfiPciIoWidthUint64, - EfiPciIoWidthFifoUint8, - EfiPciIoWidthFifoUint16, - EfiPciIoWidthFifoUint32, - EfiPciIoWidthFifoUint64, - EfiPciIoWidthFillUint8, - EfiPciIoWidthFillUint16, - EfiPciIoWidthFillUint32, - EfiPciIoWidthFillUint64, - EfiPciIoWidthMaximum -} EFI_PCI_IO_PROTOCOL_WIDTH; - -typedef enum { - EfiPciIoAttributeOperationGet, - EfiPciIoAttributeOperationSet, - EfiPciIoAttributeOperationEnable, - EfiPciIoAttributeOperationDisable, - EfiPciIoAttributeOperationSupported, - EfiPciIoAttributeOperationMaximum -} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION; - -typedef struct { - u32 read; - u32 write; -} efi_pci_io_protocol_access_32_t; - -typedef union efi_pci_io_protocol efi_pci_io_protocol_t; - -typedef -efi_status_t (__efiapi *efi_pci_io_protocol_cfg_t)(efi_pci_io_protocol_t *, - EFI_PCI_IO_PROTOCOL_WIDTH, - u32 offset, - unsigned long count, - void *buffer); - -typedef struct { - void *read; - void *write; -} efi_pci_io_protocol_access_t; - -typedef struct { - efi_pci_io_protocol_cfg_t read; - efi_pci_io_protocol_cfg_t write; -} efi_pci_io_protocol_config_access_t; - -union efi_pci_io_protocol { - struct { - void *poll_mem; - void *poll_io; - efi_pci_io_protocol_access_t mem; - efi_pci_io_protocol_access_t io; - efi_pci_io_protocol_config_access_t pci; - void *copy_mem; - void *map; - void *unmap; - void *allocate_buffer; - void *free_buffer; - void *flush; - efi_status_t (__efiapi *get_location)(efi_pci_io_protocol_t *, - unsigned long *segment_nr, - unsigned long *bus_nr, - unsigned long *device_nr, - unsigned long *func_nr); - void *attributes; - void *get_bar_attributes; - void *set_bar_attributes; - uint64_t romsize; - void *romimage; - }; - struct { - u32 poll_mem; - u32 poll_io; - efi_pci_io_protocol_access_32_t mem; - efi_pci_io_protocol_access_32_t io; - efi_pci_io_protocol_access_32_t pci; - u32 copy_mem; - u32 map; - u32 unmap; - u32 allocate_buffer; - u32 free_buffer; - u32 flush; - u32 get_location; - u32 attributes; - u32 get_bar_attributes; - u32 set_bar_attributes; - u64 romsize; - u32 romimage; - } mixed_mode; -}; - -#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001 -#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002 -#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004 -#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008 -#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010 -#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020 -#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040 -#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080 -#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100 -#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200 -#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400 -#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800 -#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000 -#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000 -#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000 -#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000 -#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000 -#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000 -#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000 - -struct efi_dev_path; - -typedef union apple_properties_protocol apple_properties_protocol_t; - -union apple_properties_protocol { - struct { - unsigned long version; - efi_status_t (__efiapi *get)(apple_properties_protocol_t *, - struct efi_dev_path *, - efi_char16_t *, void *, u32 *); - efi_status_t (__efiapi *set)(apple_properties_protocol_t *, - struct efi_dev_path *, - efi_char16_t *, void *, u32); - efi_status_t (__efiapi *del)(apple_properties_protocol_t *, - struct efi_dev_path *, - efi_char16_t *); - efi_status_t (__efiapi *get_all)(apple_properties_protocol_t *, - void *buffer, u32 *); - }; - struct { - u32 version; - u32 get; - u32 set; - u32 del; - u32 get_all; - } mixed_mode; -}; - -typedef u32 efi_tcg2_event_log_format; - -typedef union efi_tcg2_protocol efi_tcg2_protocol_t; - -union efi_tcg2_protocol { - struct { - void *get_capability; - efi_status_t (__efiapi *get_event_log)(efi_handle_t, - efi_tcg2_event_log_format, - efi_physical_addr_t *, - efi_physical_addr_t *, - efi_bool_t *); - void *hash_log_extend_event; - void *submit_command; - void *get_active_pcr_banks; - void *set_active_pcr_banks; - void *get_result_of_set_active_pcr_banks; - }; - struct { - u32 get_capability; - u32 get_event_log; - u32 hash_log_extend_event; - u32 submit_command; - u32 get_active_pcr_banks; - u32 set_active_pcr_banks; - u32 get_result_of_set_active_pcr_banks; - } mixed_mode; -}; +typedef union efi_boot_services efi_boot_services_t; /* * Types and defines for EFI ResetSystem @@ -646,6 +332,9 @@ void efi_native_runtime_setup(void); #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) #define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) +#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) +#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d) +#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9) #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) @@ -665,6 +354,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) #define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) #define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) +#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68) /* OEM GUIDs */ #define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55) @@ -788,74 +478,6 @@ struct efi_mem_range { u64 attribute; }; -struct efi_fdt_params { - u64 system_table; - u64 mmap; - u32 mmap_size; - u32 desc_size; - u32 desc_ver; -}; - -typedef struct { - u32 revision; - efi_handle_t parent_handle; - efi_system_table_t *system_table; - efi_handle_t device_handle; - void *file_path; - void *reserved; - u32 load_options_size; - void *load_options; - void *image_base; - __aligned_u64 image_size; - unsigned int image_code_type; - unsigned int image_data_type; - efi_status_t ( __efiapi *unload)(efi_handle_t image_handle); -} efi_loaded_image_t; - -typedef struct { - u64 size; - u64 file_size; - u64 phys_size; - efi_time_t create_time; - efi_time_t last_access_time; - efi_time_t modification_time; - __aligned_u64 attribute; - efi_char16_t filename[1]; -} efi_file_info_t; - -typedef struct efi_file_handle efi_file_handle_t; - -struct efi_file_handle { - u64 revision; - efi_status_t (__efiapi *open)(efi_file_handle_t *, - efi_file_handle_t **, - efi_char16_t *, u64, u64); - efi_status_t (__efiapi *close)(efi_file_handle_t *); - void *delete; - efi_status_t (__efiapi *read)(efi_file_handle_t *, - unsigned long *, void *); - void *write; - void *get_position; - void *set_position; - efi_status_t (__efiapi *get_info)(efi_file_handle_t *, - efi_guid_t *, unsigned long *, - void *); - void *set_info; - void *flush; -}; - -typedef struct efi_file_io_interface efi_file_io_interface_t; - -struct efi_file_io_interface { - u64 revision; - int (__efiapi *open_volume)(efi_file_io_interface_t *, - efi_file_handle_t **); -}; - -#define EFI_FILE_MODE_READ 0x0000000000000001 -#define EFI_FILE_MODE_WRITE 0x0000000000000002 -#define EFI_FILE_MODE_CREATE 0x8000000000000000 - typedef struct { u32 version; u32 length; @@ -865,6 +487,14 @@ typedef struct { #define EFI_PROPERTIES_TABLE_VERSION 0x00010000 #define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 +typedef struct { + u16 version; + u16 length; + u32 runtime_services_supported; +} efi_rt_properties_table_t; + +#define EFI_RT_PROPERTIES_TABLE_VERSION 0x1 + #define EFI_INVALID_TABLE_ADDR (~0UL) typedef struct { @@ -896,48 +526,63 @@ typedef struct { efi_time_t time_of_revocation; } efi_cert_x509_sha256_t; +extern unsigned long __ro_after_init efi_rng_seed; /* RNG Seed table */ + /* * All runtime access to EFI goes through this structure: */ extern struct efi { - efi_system_table_t *systab; /* EFI system table */ - unsigned int runtime_version; /* Runtime services version */ - unsigned long mps; /* MPS table */ - unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ - unsigned long acpi20; /* ACPI table (ACPI 2.0) */ - unsigned long smbios; /* SMBIOS table (32 bit entry point) */ - unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ - unsigned long boot_info; /* boot info table */ - unsigned long hcdp; /* HCDP table */ - unsigned long uga; /* UGA table */ - unsigned long fw_vendor; /* fw_vendor */ - unsigned long runtime; /* runtime table */ - unsigned long config_table; /* config tables */ - unsigned long esrt; /* ESRT table */ - unsigned long properties_table; /* properties table */ - unsigned long mem_attr_table; /* memory attributes table */ - unsigned long rng_seed; /* UEFI firmware random seed */ - unsigned long tpm_log; /* TPM2 Event Log table */ - unsigned long tpm_final_log; /* TPM2 Final Events Log table */ - unsigned long mem_reserve; /* Linux EFI memreserve table */ - efi_get_time_t *get_time; - efi_set_time_t *set_time; - efi_get_wakeup_time_t *get_wakeup_time; - efi_set_wakeup_time_t *set_wakeup_time; - efi_get_variable_t *get_variable; - efi_get_next_variable_t *get_next_variable; - efi_set_variable_t *set_variable; - efi_set_variable_t *set_variable_nonblocking; - efi_query_variable_info_t *query_variable_info; - efi_query_variable_info_t *query_variable_info_nonblocking; - efi_update_capsule_t *update_capsule; - efi_query_capsule_caps_t *query_capsule_caps; - efi_get_next_high_mono_count_t *get_next_high_mono_count; - efi_reset_system_t *reset_system; - struct efi_memory_map memmap; - unsigned long flags; + const efi_runtime_services_t *runtime; /* EFI runtime services table */ + unsigned int runtime_version; /* Runtime services version */ + unsigned int runtime_supported_mask; + + unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ + unsigned long acpi20; /* ACPI table (ACPI 2.0) */ + unsigned long smbios; /* SMBIOS table (32 bit entry point) */ + unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ + unsigned long esrt; /* ESRT table */ + unsigned long tpm_log; /* TPM2 Event Log table */ + unsigned long tpm_final_log; /* TPM2 Final Events Log table */ + + efi_get_time_t *get_time; + efi_set_time_t *set_time; + efi_get_wakeup_time_t *get_wakeup_time; + efi_set_wakeup_time_t *set_wakeup_time; + efi_get_variable_t *get_variable; + efi_get_next_variable_t *get_next_variable; + efi_set_variable_t *set_variable; + efi_set_variable_t *set_variable_nonblocking; + efi_query_variable_info_t *query_variable_info; + efi_query_variable_info_t *query_variable_info_nonblocking; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; + efi_get_next_high_mono_count_t *get_next_high_mono_count; + efi_reset_system_t *reset_system; + + struct efi_memory_map memmap; + unsigned long flags; } efi; +#define EFI_RT_SUPPORTED_GET_TIME 0x0001 +#define EFI_RT_SUPPORTED_SET_TIME 0x0002 +#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004 +#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008 +#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010 +#define EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME 0x0020 +#define EFI_RT_SUPPORTED_SET_VARIABLE 0x0040 +#define EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP 0x0080 +#define EFI_RT_SUPPORTED_CONVERT_POINTER 0x0100 +#define EFI_RT_SUPPORTED_GET_NEXT_HIGH_MONOTONIC_COUNT 0x0200 +#define EFI_RT_SUPPORTED_RESET_SYSTEM 0x0400 +#define EFI_RT_SUPPORTED_UPDATE_CAPSULE 0x0800 +#define EFI_RT_SUPPORTED_QUERY_CAPSULE_CAPABILITIES 0x1000 +#define EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO 0x2000 + +#define EFI_RT_SUPPORTED_ALL 0x3fff + +#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f +#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070 + extern struct mm_struct efi_mm; static inline int @@ -987,14 +632,18 @@ extern int __init efi_memmap_split_count(efi_memory_desc_t *md, extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf, struct efi_mem_range *mem); -extern int efi_config_init(efi_config_table_type_t *arch_tables); #ifdef CONFIG_EFI_ESRT extern void __init efi_esrt_init(void); #else static inline void efi_esrt_init(void) { } #endif -extern int efi_config_parse_tables(void *config_tables, int count, int sz, - efi_config_table_type_t *arch_tables); +extern int efi_config_parse_tables(const efi_config_table_t *config_tables, + int count, + const efi_config_table_type_t *arch_tables); +extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr, + int min_major_version); +extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr, + unsigned long fw_vendor); extern u64 efi_get_iobase (void); extern int efi_mem_type(unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); @@ -1006,7 +655,7 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size); extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern int efi_get_fdt_params(struct efi_fdt_params *params); +extern u64 efi_get_fdt_params(struct efi_memory_map_data *data); extern struct kobject *efi_kobj; extern int efi_reboot_quirk_mode; @@ -1018,6 +667,8 @@ extern void __init efi_fake_memmap(void); static inline void efi_fake_memmap(void) { } #endif +extern unsigned long efi_mem_attr_table; + /* * efi_memattr_perm_setter - arch specific callback function passed into * efi_memattr_apply_permissions() that updates the @@ -1124,6 +775,7 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ #define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ #define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */ +#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */ #ifdef CONFIG_EFI /* @@ -1142,6 +794,11 @@ static inline bool __pure efi_soft_reserve_enabled(void) return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) && __efi_soft_reserve_enabled(); } + +static inline bool efi_rt_services_supported(unsigned int mask) +{ + return (efi.runtime_supported_mask & mask) == mask; +} #else static inline bool efi_enabled(int feature) { @@ -1160,6 +817,11 @@ static inline bool efi_soft_reserve_enabled(void) { return false; } + +static inline bool efi_rt_services_supported(unsigned int mask) +{ + return false; +} #endif extern int efi_status_to_err(efi_status_t status); @@ -1189,13 +851,6 @@ extern int efi_status_to_err(efi_status_t status); #define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN /* - * The type of search to perform when calling boottime->locate_handle - */ -#define EFI_LOCATE_ALL_HANDLES 0 -#define EFI_LOCATE_BY_REGISTER_NOTIFY 1 -#define EFI_LOCATE_BY_PROTOCOL 2 - -/* * EFI Device Path information */ #define EFI_DEV_HW 0x01 @@ -1234,30 +889,40 @@ extern int efi_status_to_err(efi_status_t status); #define EFI_DEV_END_ENTIRE 0xFF struct efi_generic_dev_path { - u8 type; - u8 sub_type; - u16 length; -} __attribute ((packed)); + u8 type; + u8 sub_type; + u16 length; +} __packed; + +struct efi_acpi_dev_path { + struct efi_generic_dev_path header; + u32 hid; + u32 uid; +} __packed; + +struct efi_pci_dev_path { + struct efi_generic_dev_path header; + u8 fn; + u8 dev; +} __packed; + +struct efi_vendor_dev_path { + struct efi_generic_dev_path header; + efi_guid_t vendorguid; + u8 vendordata[]; +} __packed; struct efi_dev_path { - u8 type; /* can be replaced with unnamed */ - u8 sub_type; /* struct efi_generic_dev_path; */ - u16 length; /* once we've moved to -std=c11 */ union { - struct { - u32 hid; - u32 uid; - } acpi; - struct { - u8 fn; - u8 dev; - } pci; + struct efi_generic_dev_path header; + struct efi_acpi_dev_path acpi; + struct efi_pci_dev_path pci; + struct efi_vendor_dev_path vendor; }; -} __attribute ((packed)); +} __packed; -#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER) -struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len); -#endif +struct device *efi_get_device_by_path(const struct efi_dev_path **node, + size_t *len); static inline void memrange_efi_to_native(u64 *addr, u64 *npages) { @@ -1312,80 +977,6 @@ struct efivar_entry { bool deleting; }; -union efi_simple_text_output_protocol { - struct { - void *reset; - efi_status_t (__efiapi *output_string)(efi_simple_text_output_protocol_t *, - efi_char16_t *); - void *test_string; - }; - struct { - u32 reset; - u32 output_string; - u32 test_string; - } mixed_mode; -}; - -#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 -#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1 -#define PIXEL_BIT_MASK 2 -#define PIXEL_BLT_ONLY 3 -#define PIXEL_FORMAT_MAX 4 - -typedef struct { - u32 red_mask; - u32 green_mask; - u32 blue_mask; - u32 reserved_mask; -} efi_pixel_bitmask_t; - -typedef struct { - u32 version; - u32 horizontal_resolution; - u32 vertical_resolution; - int pixel_format; - efi_pixel_bitmask_t pixel_information; - u32 pixels_per_scan_line; -} efi_graphics_output_mode_info_t; - -typedef union efi_graphics_output_protocol_mode efi_graphics_output_protocol_mode_t; - -union efi_graphics_output_protocol_mode { - struct { - u32 max_mode; - u32 mode; - efi_graphics_output_mode_info_t *info; - unsigned long size_of_info; - efi_physical_addr_t frame_buffer_base; - unsigned long frame_buffer_size; - }; - struct { - u32 max_mode; - u32 mode; - u32 info; - u32 size_of_info; - u64 frame_buffer_base; - u32 frame_buffer_size; - } mixed_mode; -}; - -typedef union efi_graphics_output_protocol efi_graphics_output_protocol_t; - -union efi_graphics_output_protocol { - struct { - void *query_mode; - void *set_mode; - void *blt; - efi_graphics_output_protocol_mode_t *mode; - }; - struct { - u32 query_mode; - u32 set_mode; - u32 blt; - u32 mode; - } mixed_mode; -}; - extern struct list_head efivar_sysfs_list; static inline void @@ -1483,52 +1074,6 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz) #endif -/* prototypes shared between arch specific and generic stub code */ - -void efi_printk(char *str); - -void efi_free(unsigned long size, unsigned long addr); - -char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len); - -efi_status_t efi_get_memory_map(struct efi_boot_memmap *map); - -efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, - unsigned long *addr, unsigned long min); - -static inline -efi_status_t efi_low_alloc(unsigned long size, unsigned long align, - unsigned long *addr) -{ - /* - * Don't allocate at 0x0. It will confuse code that - * checks pointers against NULL. Skip the first 8 - * bytes so we start at a nice even number. - */ - return efi_low_alloc_above(size, align, addr, 0x8); -} - -efi_status_t efi_high_alloc(unsigned long size, unsigned long align, - unsigned long *addr, unsigned long max); - -efi_status_t efi_relocate_kernel(unsigned long *image_addr, - unsigned long image_size, - unsigned long alloc_size, - unsigned long preferred_addr, - unsigned long alignment, - unsigned long min_addr); - -efi_status_t handle_cmdline_files(efi_loaded_image_t *image, - char *cmd_line, char *option_string, - unsigned long max_addr, - unsigned long *load_addr, - unsigned long *load_size); - -efi_status_t efi_parse_options(char const *cmdline); - -efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto, - unsigned long size); - #ifdef CONFIG_EFI extern bool efi_runtime_disabled(void); #else @@ -1553,6 +1098,12 @@ static inline void efi_enable_reset_attack_mitigation(void) { } #endif +#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE +void efi_check_for_embedded_firmwares(void); +#else +static inline void efi_check_for_embedded_firmwares(void) { } +#endif + efi_status_t efi_random_get_seed(void); void efi_retrieve_tpm2_eventlog(void); @@ -1606,15 +1157,6 @@ void efi_retrieve_tpm2_eventlog(void); arch_efi_call_virt_teardown(); \ }) -typedef efi_status_t (*efi_exit_boot_map_processing)( - struct efi_boot_memmap *map, - void *priv); - -efi_status_t efi_exit_boot_services(void *handle, - struct efi_boot_memmap *map, - void *priv, - efi_exit_boot_map_processing priv_func); - #define EFI_RANDOM_SEED_SIZE 64U struct linux_efi_random_seed { @@ -1701,6 +1243,6 @@ struct linux_efi_memreserve { #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) -void efi_pci_disable_bridge_busmaster(void); +void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); #endif /* _LINUX_EFI_H */ diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h new file mode 100644 index 000000000000..57eac5241303 --- /dev/null +++ b/include/linux/efi_embedded_fw.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_EFI_EMBEDDED_FW_H +#define _LINUX_EFI_EMBEDDED_FW_H + +#include <linux/list.h> +#include <linux/mod_devicetable.h> + +#define EFI_EMBEDDED_FW_PREFIX_LEN 8 + +/* + * This struct and efi_embedded_fw_list are private to the efi-embedded fw + * implementation they are in this header for use by lib/test_firmware.c only! + */ +struct efi_embedded_fw { + struct list_head list; + const char *name; + const u8 *data; + size_t length; +}; + +extern struct list_head efi_embedded_fw_list; + +/** + * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw + * code to search for embedded firmwares. + * + * @name: Name to register the firmware with if found + * @prefix: First 8 bytes of the firmware + * @length: Length of the firmware in bytes including prefix + * @sha256: SHA256 of the firmware + */ +struct efi_embedded_fw_desc { + const char *name; + u8 prefix[EFI_EMBEDDED_FW_PREFIX_LEN]; + u32 length; + u8 sha256[32]; +}; + +extern const struct dmi_system_id touchscreen_dmi_table[]; + +int efi_get_embedded_fw(const char *name, const u8 **dat, size_t *sz); + +#endif diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index f236f5b931b2..594d4e78654f 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -59,7 +59,7 @@ ELFNOTE_END #else /* !__ASSEMBLER__ */ -#include <linux/elf.h> +#include <uapi/linux/elf.h> /* * Use an anonymous structure which matches the shape of * Elf{32,64}_Nhdr, but includes the name and desc data. The size and diff --git a/include/linux/err.h b/include/linux/err.h index 87be24350e91..a139c64aef2a 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -62,9 +62,6 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr) return 0; } -/* Deprecated */ -#define PTR_RET(p) PTR_ERR_OR_ZERO(p) - #endif #endif /* _LINUX_ERR_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 95991e4300bf..c1d379bf6ee1 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -177,8 +177,57 @@ void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, const unsigned long *src); +#define ETHTOOL_COALESCE_RX_USECS BIT(0) +#define ETHTOOL_COALESCE_RX_MAX_FRAMES BIT(1) +#define ETHTOOL_COALESCE_RX_USECS_IRQ BIT(2) +#define ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ BIT(3) +#define ETHTOOL_COALESCE_TX_USECS BIT(4) +#define ETHTOOL_COALESCE_TX_MAX_FRAMES BIT(5) +#define ETHTOOL_COALESCE_TX_USECS_IRQ BIT(6) +#define ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ BIT(7) +#define ETHTOOL_COALESCE_STATS_BLOCK_USECS BIT(8) +#define ETHTOOL_COALESCE_USE_ADAPTIVE_RX BIT(9) +#define ETHTOOL_COALESCE_USE_ADAPTIVE_TX BIT(10) +#define ETHTOOL_COALESCE_PKT_RATE_LOW BIT(11) +#define ETHTOOL_COALESCE_RX_USECS_LOW BIT(12) +#define ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW BIT(13) +#define ETHTOOL_COALESCE_TX_USECS_LOW BIT(14) +#define ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW BIT(15) +#define ETHTOOL_COALESCE_PKT_RATE_HIGH BIT(16) +#define ETHTOOL_COALESCE_RX_USECS_HIGH BIT(17) +#define ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH BIT(18) +#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19) +#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20) +#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21) + +#define ETHTOOL_COALESCE_USECS \ + (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) +#define ETHTOOL_COALESCE_MAX_FRAMES \ + (ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_MAX_FRAMES) +#define ETHTOOL_COALESCE_USECS_IRQ \ + (ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ) +#define ETHTOOL_COALESCE_MAX_FRAMES_IRQ \ + (ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ) +#define ETHTOOL_COALESCE_USE_ADAPTIVE \ + (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_USE_ADAPTIVE_TX) +#define ETHTOOL_COALESCE_USECS_LOW_HIGH \ + (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \ + ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH) +#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \ + (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \ + ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH) +#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \ + (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \ + ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ + ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ + ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL) + /** * struct ethtool_ops - optional netdev operations + * @supported_coalesce_params: supported types of interrupt coalescing. * @get_drvinfo: Report driver/device information. Should only set the * @driver, @version, @fw_version and @bus_info fields. If not * implemented, the @driver and @bus_info fields will be filled in @@ -207,8 +256,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * or zero. * @get_coalesce: Get interrupt coalescing parameters. Returns a negative * error code or zero. - * @set_coalesce: Set interrupt coalescing parameters. Returns a negative - * error code or zero. + * @set_coalesce: Set interrupt coalescing parameters. Supported coalescing + * types should be set in @supported_coalesce_params. + * Returns a negative error code or zero. * @get_ringparam: Report ring sizes * @set_ringparam: Set ring sizes. Returns a negative error code or zero. * @get_pauseparam: Report pause parameters @@ -292,7 +342,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * @set_per_queue_coalesce: Set interrupt coalescing parameters per queue. * It must check that the given queue number is valid. If neither a RX nor * a TX queue has this number, return -EINVAL. If only a RX queue or a TX - * queue has this number, ignore the inapplicable fields. + * queue has this number, ignore the inapplicable fields. Supported + * coalescing types should be set in @supported_coalesce_params. * Returns a negative error code or zero. * @get_link_ksettings: Get various device settings including Ethernet link * settings. The %cmd and %link_mode_masks_nwords fields should be @@ -323,6 +374,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * of the generic netdev features interface. */ struct ethtool_ops { + u32 supported_coalesce_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); int (*get_regs_len)(struct net_device *); void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); @@ -406,6 +458,8 @@ struct ethtool_ops { struct ethtool_stats *, u64 *); }; +int ethtool_check_ops(const struct ethtool_ops *ops); + struct ethtool_rx_flow_rule { struct flow_rule *rule; unsigned long priv[0]; @@ -420,4 +474,10 @@ struct ethtool_rx_flow_rule * ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input); void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule); +bool ethtool_virtdev_validate_cmd(const struct ethtool_link_ksettings *cmd); +int ethtool_virtdev_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd, + u32 *dev_speed, u8 *dev_duplex); + + #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h index 1c143d200caa..fa70945f4e6b 100644 --- a/include/linux/extcon-provider.h +++ b/include/linux/extcon-provider.h @@ -17,30 +17,30 @@ struct extcon_dev; #if IS_ENABLED(CONFIG_EXTCON) /* Following APIs register/unregister the extcon device. */ -extern int extcon_dev_register(struct extcon_dev *edev); -extern void extcon_dev_unregister(struct extcon_dev *edev); -extern int devm_extcon_dev_register(struct device *dev, +int extcon_dev_register(struct extcon_dev *edev); +void extcon_dev_unregister(struct extcon_dev *edev); +int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev); -extern void devm_extcon_dev_unregister(struct device *dev, +void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev); /* Following APIs allocate/free the memory of the extcon device. */ -extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); -extern void extcon_dev_free(struct extcon_dev *edev); -extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, +struct extcon_dev *extcon_dev_allocate(const unsigned int *cable); +void extcon_dev_free(struct extcon_dev *edev); +struct extcon_dev *devm_extcon_dev_allocate(struct device *dev, const unsigned int *cable); -extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); +void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev); /* Synchronize the state and property value for each external connector. */ -extern int extcon_sync(struct extcon_dev *edev, unsigned int id); +int extcon_sync(struct extcon_dev *edev, unsigned int id); /* * Following APIs set the connected state of each external connector. * The 'id' argument indicates the defined external connector. */ -extern int extcon_set_state(struct extcon_dev *edev, unsigned int id, +int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state); -extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, +int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state); /* @@ -52,13 +52,13 @@ extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, * for each external connector. They are used to set the capability of the * property of each external connector based on the id and property. */ -extern int extcon_set_property(struct extcon_dev *edev, unsigned int id, +int extcon_set_property(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value prop_val); -extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id, +int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id, unsigned int prop, union extcon_property_value prop_val); -extern int extcon_set_property_capability(struct extcon_dev *edev, +int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id, unsigned int prop); #else /* CONFIG_EXTCON */ diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 1b1d77ec2114..fd183fb9c20f 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -286,6 +286,11 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, { return ERR_PTR(-ENODEV); } + +static inline const char *extcon_get_edev_name(struct extcon_dev *edev) +{ + return NULL; +} #endif /* CONFIG_EXTCON */ /* diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index ac3f4888b3df..3c383ddd92dd 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -125,6 +125,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_RESIZEFS_FLAG 0x00004000 #define CP_DISABLED_QUICK_FLAG 0x00002000 #define CP_DISABLED_FLAG 0x00001000 #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index b79fa9bb7359..3049a6c06d9e 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -47,7 +47,8 @@ * Directory entry modification events - reported only to directory * where entry is modified and not to a watching parent. */ -#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) +#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \ + FAN_DIR_MODIFY) /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ diff --git a/include/linux/file.h b/include/linux/file.h index c6c7b24ea9f7..142d102f285e 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -85,6 +85,7 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); +extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile); extern int get_unused_fd_flags(unsigned flags); extern void put_unused_fd(unsigned int fd); diff --git a/include/linux/filter.h b/include/linux/filter.h index f349e2c0884c..9b5aa5c483cc 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -561,7 +561,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); #define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ u32 ret; \ - cant_sleep(); \ + cant_migrate(); \ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ struct bpf_prog_stats *stats; \ u64 start = sched_clock(); \ @@ -576,8 +576,30 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); } \ ret; }) -#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \ - bpf_dispatcher_nopfunc) +#define BPF_PROG_RUN(prog, ctx) \ + __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func) + +/* + * Use in preemptible and therefore migratable context to make sure that + * the execution of the BPF program runs on one CPU. + * + * This uses migrate_disable/enable() explicitly to document that the + * invocation of a BPF program does not require reentrancy protection + * against a BPF program which is invoked from a preempting task. + * + * For non RT enabled kernels migrate_disable/enable() maps to + * preempt_disable/enable(), i.e. it disables also preemption. + */ +static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, + const void *ctx) +{ + u32 ret; + + migrate_disable(); + ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func); + migrate_enable(); + return ret; +} #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN @@ -655,6 +677,7 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) return qdisc_skb_cb(skb)->data; } +/* Must be invoked with migration disabled */ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { @@ -680,9 +703,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, { u32 res; - preempt_disable(); + migrate_disable(); res = __bpf_prog_run_save_cb(prog, skb); - preempt_enable(); + migrate_enable(); return res; } @@ -695,13 +718,11 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, if (unlikely(prog->cb_access)) memset(cb_data, 0, BPF_SKB_CB_LEN); - preempt_disable(); - res = BPF_PROG_RUN(prog, skb); - preempt_enable(); + res = bpf_prog_run_pin_on_cpu(prog, skb); return res; } -DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp) +DECLARE_BPF_DISPATCHER(xdp) static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) @@ -712,8 +733,7 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, * already takes rcu_read_lock() when fetching the program, so * it's not necessary here anymore. */ - return __BPF_PROG_RUN(prog, xdp, - BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp)); + return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); } void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); @@ -1063,7 +1083,6 @@ bpf_address_lookup(unsigned long addr, unsigned long *size, void bpf_prog_kallsyms_add(struct bpf_prog *fp); void bpf_prog_kallsyms_del(struct bpf_prog *fp); -void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); #else /* CONFIG_BPF_JIT */ @@ -1132,11 +1151,6 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) { } -static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) -{ - sym[0] = '\0'; -} - #endif /* CONFIG_BPF_JIT */ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 2dd566c91d44..4bbd0afd91b7 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -44,6 +44,8 @@ int request_firmware(const struct firmware **fw, const char *name, struct device *device); int firmware_request_nowarn(const struct firmware **fw, const char *name, struct device *device); +int firmware_request_platform(const struct firmware **fw, const char *name, + struct device *device); int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, @@ -69,6 +71,13 @@ static inline int firmware_request_nowarn(const struct firmware **fw, return -EINVAL; } +static inline int firmware_request_platform(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} + static inline int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h index 6312c8cb084a..891057434858 100644 --- a/include/linux/firmware/imx/ipc.h +++ b/include/linux/firmware/imx/ipc.h @@ -25,7 +25,6 @@ enum imx_sc_rpc_svc { IMX_SC_RPC_SVC_PAD = 6, IMX_SC_RPC_SVC_MISC = 7, IMX_SC_RPC_SVC_IRQ = 8, - IMX_SC_RPC_SVC_ABORT = 9 }; struct imx_sc_rpc_msg { diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h index 6669e2a1d5fd..95b0da2326a9 100644 --- a/include/linux/firmware/meson/meson_sm.h +++ b/include/linux/firmware/meson/meson_sm.h @@ -12,6 +12,8 @@ enum { SM_EFUSE_WRITE, SM_EFUSE_USER_MAX, SM_GET_CHIP_ID, + SM_A1_PWRC_SET, + SM_A1_PWRC_GET, }; struct meson_sm_firmware; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 2cd12ebd6826..8efa5ac22d7e 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -82,6 +82,7 @@ enum pm_api_id { PM_CLOCK_GETRATE, PM_CLOCK_SETPARENT, PM_CLOCK_GETPARENT, + PM_SECURE_AES = 47, PM_FEATURE_CHECK = 63, PM_API_MAX, }; @@ -100,7 +101,8 @@ enum pm_ret_status { }; enum pm_ioctl_id { - IOCTL_SET_SD_TAPDELAY = 7, + IOCTL_SD_DLL_RESET = 6, + IOCTL_SET_SD_TAPDELAY, IOCTL_SET_PLL_FRAC_MODE, IOCTL_GET_PLL_FRAC_MODE, IOCTL_SET_PLL_FRAC_DATA, @@ -271,6 +273,12 @@ enum tap_delay_type { PM_TAPDELAY_OUTPUT, }; +enum dll_reset_type { + PM_DLL_RESET_ASSERT, + PM_DLL_RESET_RELEASE, + PM_DLL_RESET_PULSE, +}; + /** * struct zynqmp_pm_query_data - PM query data * @qid: query ID @@ -315,12 +323,13 @@ struct zynqmp_eemi_ops { const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack); + int (*aes)(const u64 address, u32 *out); }; int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 *ret_payload); -#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) +#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); #else static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) diff --git a/include/linux/fs.h b/include/linux/fs.h index 3cd4fe6b845e..4f6f59b4f22a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -526,6 +526,11 @@ static inline void i_mmap_lock_write(struct address_space *mapping) down_write(&mapping->i_mmap_rwsem); } +static inline int i_mmap_trylock_write(struct address_space *mapping) +{ + return down_write_trylock(&mapping->i_mmap_rwsem); +} + static inline void i_mmap_unlock_write(struct address_space *mapping) { up_write(&mapping->i_mmap_rwsem); @@ -698,6 +703,7 @@ struct inode { struct rcu_head i_rcu; }; atomic64_t i_version; + atomic64_t i_sequence; /* see futex */ atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; @@ -2699,7 +2705,6 @@ static inline void unregister_chrdev(unsigned int major, const char *name) #ifdef CONFIG_BLOCK #define BLKDEV_MAJOR_MAX 512 -extern const char *__bdevname(dev_t, char *buffer); extern const char *bdevname(struct block_device *bdev, char *buffer); extern struct block_device *lookup_bdev(const char *); extern void blkdev_show(struct seq_file *,off_t); @@ -2982,6 +2987,7 @@ extern int do_pipe_flags(int *, int); id(UNKNOWN, unknown) \ id(FIRMWARE, firmware) \ id(FIRMWARE_PREALLOC_BUFFER, firmware) \ + id(FIRMWARE_EFI_EMBEDDED, firmware) \ id(MODULE, kernel-module) \ id(KEXEC_IMAGE, kexec-image) \ id(KEXEC_INITRAMFS, kexec-initramfs) \ @@ -3012,6 +3018,8 @@ extern int kernel_read_file(struct file *, void **, loff_t *, loff_t, enum kernel_read_file_id); extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t, enum kernel_read_file_id); +extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, loff_t, + enum kernel_read_file_id); extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, enum kernel_read_file_id); extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); @@ -3391,7 +3399,7 @@ static inline bool io_is_direct(struct file *filp) return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); } -static inline bool vma_is_dax(struct vm_area_struct *vma) +static inline bool vma_is_dax(const struct vm_area_struct *vma) { return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); } diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 556f4adf5dc5..e3c2d2a15525 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -139,6 +139,7 @@ extern void fscrypt_free_bounce_page(struct page *bounce_page); extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); +extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); @@ -300,6 +301,11 @@ static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 54d9436600c7..2b5f8366dbe1 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -381,6 +381,22 @@ int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); +/** + * struct fsl_mc_version + * @major: Major version number: incremented on API compatibility changes + * @minor: Minor version number: incremented on API additions (that are + * backward compatible); reset when major version is incremented + * @revision: Internal revision number: incremented on implementation changes + * and/or bug fixes that have no impact on API + */ +struct fsl_mc_version { + u32 major; + u32 minor; + u32 revision; +}; + +struct fsl_mc_version *fsl_mc_get_version(void); + int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, u16 mc_io_flags, struct fsl_mc_io **new_mc_io); diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index b0b743563f43..75884563059f 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -149,8 +149,6 @@ struct ptp_qoriq { bool extts_fifo_support; int irq; int phc_index; - u64 alarm_interval; /* for periodic alarm */ - u64 alarm_value; u32 tclk_period; /* nanoseconds */ u32 tmr_prsc; u32 tmr_add; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index a2d5d175d3c1..5ab28f6c7d26 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -18,39 +18,63 @@ #include <linux/bug.h> /* - * Notify this @dir inode about a change in the directory entry @dentry. + * Notify this @dir inode about a change in a child directory entry. + * The directory entry may have turned positive or negative or its inode may + * have changed (i.e. renamed over). * * Unlike fsnotify_parent(), the event will be reported regardless of the * FS_EVENT_ON_CHILD mask on the parent inode. */ -static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry, - __u32 mask) +static inline void fsnotify_name(struct inode *dir, __u32 mask, + struct inode *child, + const struct qstr *name, u32 cookie) { - return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, - &dentry->d_name, 0); + fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); + /* + * Send another flavor of the event without child inode data and + * without the specific event type (e.g. FS_CREATE|FS_IS_DIR). + * The name is relative to the dir inode the event is reported to. + */ + fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0); } -/* Notify this dentry's parent about a child's events. */ -static inline int fsnotify_parent(const struct path *path, - struct dentry *dentry, __u32 mask) +static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, + __u32 mask) { - if (!dentry) - dentry = path->dentry; - - return __fsnotify_parent(path, dentry, mask); + fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } /* - * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when - * an event is on a path. + * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when + * an event is on a file/dentry. */ -static inline int fsnotify_path(struct inode *inode, const struct path *path, - __u32 mask) +static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) +{ + struct inode *inode = d_inode(dentry); + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +static inline int fsnotify_file(struct file *file, __u32 mask) { - int ret = fsnotify_parent(path, NULL, mask); + const struct path *path = &file->f_path; + struct inode *inode = file_inode(file); + int ret; + + if (file->f_mode & FMODE_NONOTIFY) + return 0; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); if (ret) return ret; + return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); } @@ -58,19 +82,16 @@ static inline int fsnotify_path(struct inode *inode, const struct path *path, static inline int fsnotify_perm(struct file *file, int mask) { int ret; - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); __u32 fsnotify_mask = 0; - if (file->f_mode & FMODE_NONOTIFY) - return 0; if (!(mask & (MAY_READ | MAY_OPEN))) return 0; + if (mask & MAY_OPEN) { fsnotify_mask = FS_OPEN_PERM; if (file->f_flags & __FMODE_EXEC) { - ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM); + ret = fsnotify_file(file, FS_OPEN_EXEC_PERM); if (ret) return ret; @@ -79,10 +100,7 @@ static inline int fsnotify_perm(struct file *file, int mask) fsnotify_mask = FS_ACCESS_PERM; } - if (S_ISDIR(inode->i_mode)) - fsnotify_mask |= FS_ISDIR; - - return fsnotify_path(inode, path, fsnotify_mask); + return fsnotify_file(file, fsnotify_mask); } /* @@ -122,10 +140,8 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, mask |= FS_ISDIR; } - fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, - fs_cookie); - fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name, - fs_cookie); + fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); + fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie); if (target) fsnotify_link_count(target); @@ -180,12 +196,13 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) * Note: We have to pass also the linked inode ptr as some filesystems leave * new_dentry->d_inode NULL and instantiate inode pointer later */ -static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) +static inline void fsnotify_link(struct inode *dir, struct inode *inode, + struct dentry *new_dentry) { fsnotify_link_count(inode); audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0); + fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); } /* @@ -229,15 +246,7 @@ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) */ static inline void fsnotify_access(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - __u32 mask = FS_ACCESS; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - if (!(file->f_mode & FMODE_NONOTIFY)) - fsnotify_path(inode, path, mask); + fsnotify_file(file, FS_ACCESS); } /* @@ -245,15 +254,7 @@ static inline void fsnotify_access(struct file *file) */ static inline void fsnotify_modify(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - __u32 mask = FS_MODIFY; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - if (!(file->f_mode & FMODE_NONOTIFY)) - fsnotify_path(inode, path, mask); + fsnotify_file(file, FS_MODIFY); } /* @@ -261,16 +262,12 @@ static inline void fsnotify_modify(struct file *file) */ static inline void fsnotify_open(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); __u32 mask = FS_OPEN; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; if (file->f_flags & __FMODE_EXEC) mask |= FS_OPEN_EXEC; - fsnotify_path(inode, path, mask); + fsnotify_file(file, mask); } /* @@ -278,16 +275,10 @@ static inline void fsnotify_open(struct file *file) */ static inline void fsnotify_close(struct file *file) { - const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - fmode_t mode = file->f_mode; - __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; + __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE : + FS_CLOSE_NOWRITE; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - if (!(file->f_mode & FMODE_NONOTIFY)) - fsnotify_path(inode, path, mask); + fsnotify_file(file, mask); } /* @@ -295,14 +286,7 @@ static inline void fsnotify_close(struct file *file) */ static inline void fsnotify_xattr(struct dentry *dentry) { - struct inode *inode = dentry->d_inode; - __u32 mask = FS_ATTRIB; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify_parent(NULL, dentry, mask); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_dentry(dentry, FS_ATTRIB); } /* @@ -311,7 +295,6 @@ static inline void fsnotify_xattr(struct dentry *dentry) */ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { - struct inode *inode = dentry->d_inode; __u32 mask = 0; if (ia_valid & ATTR_UID) @@ -332,13 +315,8 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) if (ia_valid & ATTR_MODE) mask |= FS_ATTRIB; - if (mask) { - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify_parent(NULL, dentry, mask); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); - } + if (mask) + fsnotify_dentry(dentry, mask); } #endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1915bdba2fad..f0c506405b54 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -47,18 +47,18 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ +#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ -#define FS_ISDIR 0x40000000 /* event occurred against dir */ -#define FS_IN_ONESHOT 0x80000000 /* only send event once */ - -#define FS_DN_RENAME 0x10000000 /* file renamed */ -#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ - /* This inode cares about things that happen to its children. Always set for * dnotify and inotify. */ #define FS_EVENT_ON_CHILD 0x08000000 +#define FS_DN_RENAME 0x10000000 /* file renamed */ +#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ +#define FS_ISDIR 0x40000000 /* event occurred against dir */ +#define FS_IN_ONESHOT 0x80000000 /* only send event once */ + #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) /* @@ -67,7 +67,8 @@ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ -#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) +#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \ + FS_DIR_MODIFY) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) @@ -133,8 +134,7 @@ struct fsnotify_ops { */ struct fsnotify_event { struct list_head list; - /* inode may ONLY be dereferenced during handle_event(). */ - struct inode *inode; /* either the inode the event happened to or its parent */ + unsigned long objectid; /* identifier for queue merges */ }; /* @@ -213,10 +213,36 @@ struct fsnotify_group { }; }; -/* when calling fsnotify tell it if the data is a path or inode */ -#define FSNOTIFY_EVENT_NONE 0 -#define FSNOTIFY_EVENT_PATH 1 -#define FSNOTIFY_EVENT_INODE 2 +/* When calling fsnotify tell it if the data is a path or inode */ +enum fsnotify_data_type { + FSNOTIFY_EVENT_NONE, + FSNOTIFY_EVENT_PATH, + FSNOTIFY_EVENT_INODE, +}; + +static inline const struct inode *fsnotify_data_inode(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_INODE: + return data; + case FSNOTIFY_EVENT_PATH: + return d_inode(((const struct path *)data)->dentry); + default: + return NULL; + } +} + +static inline const struct path *fsnotify_data_path(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_PATH: + return data; + default: + return NULL; + } +} enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, @@ -351,9 +377,10 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, - const struct qstr *name, u32 cookie); -extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask); +extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, + int data_type, const struct qstr *name, u32 cookie); +extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, + int data_type); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); @@ -500,21 +527,22 @@ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); static inline void fsnotify_init_event(struct fsnotify_event *event, - struct inode *inode) + unsigned long objectid) { INIT_LIST_HEAD(&event->list); - event->inode = inode; + event->objectid = objectid; } #else -static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, - const struct qstr *name, u32 cookie) +static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, + int data_type, const struct qstr *name, u32 cookie) { return 0; } -static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) +static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, + const void *data, int data_type) { return 0; } diff --git a/include/linux/futex.h b/include/linux/futex.h index 5cc3fed27d4c..b70df27d7e85 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -31,23 +31,26 @@ struct task_struct; union futex_key { struct { + u64 i_seq; unsigned long pgoff; - struct inode *inode; - int offset; + unsigned int offset; } shared; struct { + union { + struct mm_struct *mm; + u64 __tmp; + }; unsigned long address; - struct mm_struct *mm; - int offset; + unsigned int offset; } private; struct { + u64 ptr; unsigned long word; - void *ptr; - int offset; + unsigned int offset; } both; }; -#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } #ifdef CONFIG_FUTEX enum { diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 8feeb94b8acc..e0abafbb17f8 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -170,4 +170,6 @@ struct fwnode_operations { } while (false) #define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) +extern u32 fw_devlink_get_flags(void); + #endif diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 6fbe58538ad6..9b3fffdf4011 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -27,39 +27,8 @@ #define part_to_dev(part) (&((part)->__dev)) extern struct device_type part_type; -extern struct kobject *block_depr; extern struct class block_class; -enum { -/* These three have identical behaviour; use the second one if DOS FDISK gets - confused about extended/logical partitions starting past cylinder 1023. */ - DOS_EXTENDED_PARTITION = 5, - LINUX_EXTENDED_PARTITION = 0x85, - WIN98_EXTENDED_PARTITION = 0x0f, - - SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION, - - LINUX_SWAP_PARTITION = 0x82, - LINUX_DATA_PARTITION = 0x83, - LINUX_LVM_PARTITION = 0x8e, - LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ - - SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION, - NEW_SOLARIS_X86_PARTITION = 0xbf, - - DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ - DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ - DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ - EZD_PARTITION = 0x55, /* EZ-DRIVE */ - - FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ - OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ - NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ - BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ - MINIX_PARTITION = 0x81, /* Minix Partition ID */ - UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ -}; - #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 @@ -70,26 +39,12 @@ enum { #include <linux/fs.h> #include <linux/workqueue.h> -struct partition { - unsigned char boot_ind; /* 0x80 - active */ - unsigned char head; /* starting head */ - unsigned char sector; /* starting sector */ - unsigned char cyl; /* starting cylinder */ - unsigned char sys_ind; /* What partition type */ - unsigned char end_head; /* end head */ - unsigned char end_sector; /* end sector */ - unsigned char end_cyl; /* end cylinder */ - __le32 start_sect; /* starting sector counting from 0 */ - __le32 nr_sects; /* nr of sectors in partition */ -} __attribute__((packed)); - struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; unsigned long io_ticks; - unsigned long time_in_queue; local_t in_flight[2]; }; @@ -133,17 +88,64 @@ struct hd_struct { struct rcu_work rcu_work; }; -#define GENHD_FL_REMOVABLE 1 -/* 2 is unused */ -#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 -#define GENHD_FL_CD 8 -#define GENHD_FL_UP 16 -#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 -#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ -#define GENHD_FL_NATIVE_CAPACITY 128 -#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256 -#define GENHD_FL_NO_PART_SCAN 512 -#define GENHD_FL_HIDDEN 1024 +/** + * DOC: genhd capability flags + * + * ``GENHD_FL_REMOVABLE`` (0x0001): indicates that the block device + * gives access to removable media. + * When set, the device remains present even when media is not + * inserted. + * Must not be set for devices which are removed entirely when the + * media is removed. + * + * ``GENHD_FL_CD`` (0x0008): the block device is a CD-ROM-style + * device. + * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl. + * + * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up", + * with a similar meaning to network interfaces. + * + * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include + * partition information in ``/proc/partitions`` or in the output of + * printk_all_partitions(). + * Used for the null block device and some MMC devices. + * + * ``GENHD_FL_EXT_DEVT`` (0x0040): the driver supports extended + * dynamic ``dev_t``, i.e. it wants extended device numbers + * (``BLOCK_EXT_MAJOR``). + * This affects the maximum number of partitions. + * + * ``GENHD_FL_NATIVE_CAPACITY`` (0x0080): based on information in the + * partition table, the device's capacity has been extended to its + * native capacity; i.e. the device has hidden capacity used by one + * of the partitions (this is a flag used so that native capacity is + * only ever unlocked once). + * + * ``GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE`` (0x0100): event polling is + * blocked whenever a writer holds an exclusive lock. + * + * ``GENHD_FL_NO_PART_SCAN`` (0x0200): partition scanning is disabled. + * Used for loop devices in their default settings and some MMC + * devices. + * + * ``GENHD_FL_HIDDEN`` (0x0400): the block device is hidden; it + * doesn't produce events, doesn't appear in sysfs, and doesn't have + * an associated ``bdev``. + * Implies ``GENHD_FL_SUPPRESS_PARTITION_INFO`` and + * ``GENHD_FL_NO_PART_SCAN``. + * Used for multipath devices. + */ +#define GENHD_FL_REMOVABLE 0x0001 +/* 2 is unused (used to be GENHD_FL_DRIVERFS) */ +/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */ +#define GENHD_FL_CD 0x0008 +#define GENHD_FL_UP 0x0010 +#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020 +#define GENHD_FL_EXT_DEVT 0x0040 +#define GENHD_FL_NATIVE_CAPACITY 0x0080 +#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 0x0100 +#define GENHD_FL_NO_PART_SCAN 0x0200 +#define GENHD_FL_HIDDEN 0x0400 enum { DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ @@ -189,7 +191,6 @@ struct gendisk { * disks that can't be partitioned. */ char disk_name[DISK_NAME_LEN]; /* name of major driver */ - char *(*devnode)(struct gendisk *gd, umode_t *mode); unsigned short events; /* supported events */ unsigned short event_flags; /* flags related to event processing */ @@ -245,18 +246,6 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk) !(disk->flags & GENHD_FL_NO_PART_SCAN); } -static inline bool disk_has_partitions(struct gendisk *disk) -{ - bool ret = false; - - rcu_read_lock(); - if (rcu_dereference(disk->part_tbl)->len > 1) - ret = true; - rcu_read_unlock(); - - return ret; -} - static inline dev_t disk_devt(struct gendisk *disk) { return MKDEV(disk->major, disk->first_minor); @@ -295,143 +284,7 @@ extern void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, unsigned int flags); extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); extern void disk_part_iter_exit(struct disk_part_iter *piter); - -extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, - sector_t sector); - -/* - * Macros to operate on percpu disk statistics: - * - * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters - * and should be called between disk_stat_lock() and - * disk_stat_unlock(). - * - * part_stat_read() can be called at any time. - * - * part_stat_{add|set_all}() and {init|free}_part_stats are for - * internal use only. - */ -#ifdef CONFIG_SMP -#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) -#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) - -#define part_stat_get_cpu(part, field, cpu) \ - (per_cpu_ptr((part)->dkstats, (cpu))->field) - -#define part_stat_get(part, field) \ - part_stat_get_cpu(part, field, smp_processor_id()) - -#define part_stat_read(part, field) \ -({ \ - typeof((part)->dkstats->field) res = 0; \ - unsigned int _cpu; \ - for_each_possible_cpu(_cpu) \ - res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ - res; \ -}) - -static inline void part_stat_set_all(struct hd_struct *part, int value) -{ - int i; - - for_each_possible_cpu(i) - memset(per_cpu_ptr(part->dkstats, i), value, - sizeof(struct disk_stats)); -} - -static inline int init_part_stats(struct hd_struct *part) -{ - part->dkstats = alloc_percpu(struct disk_stats); - if (!part->dkstats) - return 0; - return 1; -} - -static inline void free_part_stats(struct hd_struct *part) -{ - free_percpu(part->dkstats); -} - -#else /* !CONFIG_SMP */ -#define part_stat_lock() ({ rcu_read_lock(); 0; }) -#define part_stat_unlock() rcu_read_unlock() - -#define part_stat_get(part, field) ((part)->dkstats.field) -#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field) -#define part_stat_read(part, field) part_stat_get(part, field) - -static inline void part_stat_set_all(struct hd_struct *part, int value) -{ - memset(&part->dkstats, value, sizeof(struct disk_stats)); -} - -static inline int init_part_stats(struct hd_struct *part) -{ - return 1; -} - -static inline void free_part_stats(struct hd_struct *part) -{ -} - -#endif /* CONFIG_SMP */ - -#define part_stat_read_msecs(part, which) \ - div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC) - -#define part_stat_read_accum(part, field) \ - (part_stat_read(part, field[STAT_READ]) + \ - part_stat_read(part, field[STAT_WRITE]) + \ - part_stat_read(part, field[STAT_DISCARD])) - -#define __part_stat_add(part, field, addnd) \ - (part_stat_get(part, field) += (addnd)) - -#define part_stat_add(part, field, addnd) do { \ - __part_stat_add((part), field, addnd); \ - if ((part)->partno) \ - __part_stat_add(&part_to_disk((part))->part0, \ - field, addnd); \ -} while (0) - -#define part_stat_dec(gendiskp, field) \ - part_stat_add(gendiskp, field, -1) -#define part_stat_inc(gendiskp, field) \ - part_stat_add(gendiskp, field, 1) -#define part_stat_sub(gendiskp, field, subnd) \ - part_stat_add(gendiskp, field, -subnd) - -#define part_stat_local_dec(gendiskp, field) \ - local_dec(&(part_stat_get(gendiskp, field))) -#define part_stat_local_inc(gendiskp, field) \ - local_inc(&(part_stat_get(gendiskp, field))) -#define part_stat_local_read(gendiskp, field) \ - local_read(&(part_stat_get(gendiskp, field))) -#define part_stat_local_read_cpu(gendiskp, field, cpu) \ - local_read(&(part_stat_get_cpu(gendiskp, field, cpu))) - -unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part); -void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); -void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, - int rw); -void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, - int rw); - -static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) -{ - if (disk) - return kzalloc_node(sizeof(struct partition_meta_info), - GFP_KERNEL, disk->node_id); - return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); -} - -static inline void free_part_info(struct hd_struct *part) -{ - kfree(part->info); -} - -void update_io_ticks(struct hd_struct *part, unsigned long now); +extern bool disk_has_partitions(struct gendisk *disk); /* block/genhd.c */ extern void device_add_disk(struct device *parent, struct gendisk *disk, @@ -461,6 +314,8 @@ static inline int get_disk_ro(struct gendisk *disk) extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); +extern void set_capacity_revalidate_and_notify(struct gendisk *disk, + sector_t size, bool revalidate); extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); /* drivers/char/random.c */ @@ -480,170 +335,11 @@ static inline void set_capacity(struct gendisk *disk, sector_t size) disk->part0.nr_sects = size; } -#ifdef CONFIG_SOLARIS_X86_PARTITION - -#define SOLARIS_X86_NUMSLICE 16 -#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL) - -struct solaris_x86_slice { - __le16 s_tag; /* ID tag of partition */ - __le16 s_flag; /* permission flags */ - __le32 s_start; /* start sector no of partition */ - __le32 s_size; /* # of blocks in partition */ -}; - -struct solaris_x86_vtoc { - unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */ - __le32 v_sanity; /* to verify vtoc sanity */ - __le32 v_version; /* layout version */ - char v_volume[8]; /* volume name */ - __le16 v_sectorsz; /* sector size in bytes */ - __le16 v_nparts; /* number of partitions */ - unsigned int v_reserved[10]; /* free space */ - struct solaris_x86_slice - v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */ - unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */ - char v_asciilabel[128]; /* for compatibility */ -}; - -#endif /* CONFIG_SOLARIS_X86_PARTITION */ - -#ifdef CONFIG_BSD_DISKLABEL -/* - * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il> - * updated by Marc Espie <Marc.Espie@openbsd.org> - */ - -/* check against BSD src/sys/sys/disklabel.h for consistency */ - -#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */ -#define BSD_MAXPARTITIONS 16 -#define OPENBSD_MAXPARTITIONS 16 -#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */ -struct bsd_disklabel { - __le32 d_magic; /* the magic number */ - __s16 d_type; /* drive type */ - __s16 d_subtype; /* controller/d_type specific */ - char d_typename[16]; /* type name, e.g. "eagle" */ - char d_packname[16]; /* pack identifier */ - __u32 d_secsize; /* # of bytes per sector */ - __u32 d_nsectors; /* # of data sectors per track */ - __u32 d_ntracks; /* # of tracks per cylinder */ - __u32 d_ncylinders; /* # of data cylinders per unit */ - __u32 d_secpercyl; /* # of data sectors per cylinder */ - __u32 d_secperunit; /* # of data sectors per unit */ - __u16 d_sparespertrack; /* # of spare sectors per track */ - __u16 d_sparespercyl; /* # of spare sectors per cylinder */ - __u32 d_acylinders; /* # of alt. cylinders per unit */ - __u16 d_rpm; /* rotational speed */ - __u16 d_interleave; /* hardware sector interleave */ - __u16 d_trackskew; /* sector 0 skew, per track */ - __u16 d_cylskew; /* sector 0 skew, per cylinder */ - __u32 d_headswitch; /* head switch time, usec */ - __u32 d_trkseek; /* track-to-track seek, usec */ - __u32 d_flags; /* generic flags */ -#define NDDATA 5 - __u32 d_drivedata[NDDATA]; /* drive-type specific information */ -#define NSPARE 5 - __u32 d_spare[NSPARE]; /* reserved for future use */ - __le32 d_magic2; /* the magic number (again) */ - __le16 d_checksum; /* xor of data incl. partitions */ - - /* filesystem and partition information: */ - __le16 d_npartitions; /* number of partitions in following */ - __le32 d_bbsize; /* size of boot area at sn0, bytes */ - __le32 d_sbsize; /* max size of fs superblock, bytes */ - struct bsd_partition { /* the partition table */ - __le32 p_size; /* number of sectors in partition */ - __le32 p_offset; /* starting sector */ - __le32 p_fsize; /* filesystem basic fragment size */ - __u8 p_fstype; /* filesystem type, see below */ - __u8 p_frag; /* filesystem fragments per block */ - __le16 p_cpg; /* filesystem cylinders per group */ - } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */ -}; - -#endif /* CONFIG_BSD_DISKLABEL */ - -#ifdef CONFIG_UNIXWARE_DISKLABEL -/* - * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl> - * and Krzysztof G. Baranowski <kgb@knm.org.pl> - */ - -#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */ -#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */ -#define UNIXWARE_NUMSLICE 16 -#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */ - -struct unixware_slice { - __le16 s_label; /* label */ - __le16 s_flags; /* permission flags */ - __le32 start_sect; /* starting sector */ - __le32 nr_sects; /* number of sectors in slice */ -}; - -struct unixware_disklabel { - __le32 d_type; /* drive type */ - __le32 d_magic; /* the magic number */ - __le32 d_version; /* version number */ - char d_serial[12]; /* serial number of the device */ - __le32 d_ncylinders; /* # of data cylinders per device */ - __le32 d_ntracks; /* # of tracks per cylinder */ - __le32 d_nsectors; /* # of data sectors per track */ - __le32 d_secsize; /* # of bytes per sector */ - __le32 d_part_start; /* # of first sector of this partition */ - __le32 d_unknown1[12]; /* ? */ - __le32 d_alt_tbl; /* byte offset of alternate table */ - __le32 d_alt_len; /* byte length of alternate table */ - __le32 d_phys_cyl; /* # of physical cylinders per device */ - __le32 d_phys_trk; /* # of physical tracks per cylinder */ - __le32 d_phys_sec; /* # of physical sectors per track */ - __le32 d_phys_bytes; /* # of physical bytes per sector */ - __le32 d_unknown2; /* ? */ - __le32 d_unknown3; /* ? */ - __le32 d_pad[8]; /* pad */ - - struct unixware_vtoc { - __le32 v_magic; /* the magic number */ - __le32 v_version; /* version number */ - char v_name[8]; /* volume name */ - __le16 v_nslices; /* # of slices */ - __le16 v_unknown1; /* ? */ - __le32 v_reserved[10]; /* reserved */ - struct unixware_slice - v_slice[UNIXWARE_NUMSLICE]; /* slice headers */ - } vtoc; - -}; /* 408 */ - -#endif /* CONFIG_UNIXWARE_DISKLABEL */ - -#ifdef CONFIG_MINIX_SUBPARTITION -# define MINIX_NR_SUBPARTITIONS 4 -#endif /* CONFIG_MINIX_SUBPARTITION */ - -#define ADDPART_FLAG_NONE 0 -#define ADDPART_FLAG_RAID 1 -#define ADDPART_FLAG_WHOLEDISK 2 - -extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); -extern void blk_free_devt(dev_t devt); -extern void blk_invalidate_devt(dev_t devt); extern dev_t blk_lookup_devt(const char *name, int partno); -extern char *disk_name (struct gendisk *hd, int partno, char *buf); int bdev_disk_changed(struct block_device *bdev, bool invalidate); int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev); -extern int disk_expand_part_tbl(struct gendisk *disk, int target); -extern struct hd_struct * __must_check add_partition(struct gendisk *disk, - int partno, sector_t start, - sector_t len, int flags, - struct partition_meta_info - *info); -extern void __delete_partition(struct percpu_ref *); -extern void delete_partition(struct gendisk *, int); extern void printk_all_partitions(void); extern struct gendisk *__alloc_disk_node(int minors, int node_id); @@ -657,20 +353,6 @@ extern void blk_register_region(dev_t devt, unsigned long range, void *data); extern void blk_unregister_region(dev_t devt, unsigned long range); -extern ssize_t part_size_show(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t part_stat_show(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t part_inflight_show(struct device *dev, - struct device_attribute *attr, char *buf); -#ifdef CONFIG_FAIL_MAKE_REQUEST -extern ssize_t part_fail_show(struct device *dev, - struct device_attribute *attr, char *buf); -extern ssize_t part_fail_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -#endif /* CONFIG_FAIL_MAKE_REQUEST */ - #define alloc_disk_node(minors, node_id) \ ({ \ static struct lock_class_key __key; \ @@ -689,100 +371,6 @@ extern ssize_t part_fail_store(struct device *dev, #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) -static inline int hd_ref_init(struct hd_struct *part) -{ - if (percpu_ref_init(&part->ref, __delete_partition, 0, - GFP_KERNEL)) - return -ENOMEM; - return 0; -} - -static inline void hd_struct_get(struct hd_struct *part) -{ - percpu_ref_get(&part->ref); -} - -static inline int hd_struct_try_get(struct hd_struct *part) -{ - return percpu_ref_tryget_live(&part->ref); -} - -static inline void hd_struct_put(struct hd_struct *part) -{ - percpu_ref_put(&part->ref); -} - -static inline void hd_struct_kill(struct hd_struct *part) -{ - percpu_ref_kill(&part->ref); -} - -static inline void hd_free_part(struct hd_struct *part) -{ - free_part_stats(part); - free_part_info(part); - percpu_ref_exit(&part->ref); -} - -/* - * Any access of part->nr_sects which is not protected by partition - * bd_mutex or gendisk bdev bd_mutex, should be done using this - * accessor function. - * - * Code written along the lines of i_size_read() and i_size_write(). - * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption - * on. - */ -static inline sector_t part_nr_sects_read(struct hd_struct *part) -{ -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) - sector_t nr_sects; - unsigned seq; - do { - seq = read_seqcount_begin(&part->nr_sects_seq); - nr_sects = part->nr_sects; - } while (read_seqcount_retry(&part->nr_sects_seq, seq)); - return nr_sects; -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) - sector_t nr_sects; - - preempt_disable(); - nr_sects = part->nr_sects; - preempt_enable(); - return nr_sects; -#else - return part->nr_sects; -#endif -} - -/* - * Should be called with mutex lock held (typically bd_mutex) of partition - * to provide mutual exlusion among writers otherwise seqcount might be - * left in wrong state leaving the readers spinning infinitely. - */ -static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) -{ -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) - write_seqcount_begin(&part->nr_sects_seq); - part->nr_sects = size; - write_seqcount_end(&part->nr_sects_seq); -#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) - preempt_disable(); - part->nr_sects = size; - preempt_enable(); -#else - part->nr_sects = size; -#endif -} - -#if defined(CONFIG_BLK_DEV_INTEGRITY) -extern void blk_integrity_add(struct gendisk *); -extern void blk_integrity_del(struct gendisk *); -#else /* CONFIG_BLK_DEV_INTEGRITY */ -static inline void blk_integrity_add(struct gendisk *disk) { } -static inline void blk_integrity_del(struct gendisk *disk) { } -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } diff --git a/include/linux/gfp.h b/include/linux/gfp.h index e5b817cb86e7..4aba4c86c626 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -124,6 +124,8 @@ struct vm_area_struct; * * Reclaim modifiers * ~~~~~~~~~~~~~~~~~ + * Please note that all the following flags are only applicable to sleepable + * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). * * %__GFP_IO can start physical IO. * @@ -485,6 +487,12 @@ static inline void arch_free_page(struct page *page, int order) { } #ifndef HAVE_ARCH_ALLOC_PAGE static inline void arch_alloc_page(struct page *page, int order) { } #endif +#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE +static inline int arch_make_page_accessible(struct page *page) +{ + return 0; +} +#endif struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 2157717c2136..008ad3ee56b7 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -102,11 +102,9 @@ void devm_gpio_free(struct device *dev, unsigned int gpio); #include <linux/kernel.h> #include <linux/types.h> #include <linux/bug.h> -#include <linux/pinctrl/pinctrl.h> struct device; struct gpio_chip; -struct pinctrl_dev; static inline bool gpio_is_valid(int number) { diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index bf2d017dd7b7..901aab89d025 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -2,9 +2,10 @@ #ifndef __LINUX_GPIO_CONSUMER_H #define __LINUX_GPIO_CONSUMER_H +#include <linux/bits.h> #include <linux/bug.h> +#include <linux/compiler_types.h> #include <linux/err.h> -#include <linux/kernel.h> struct device; @@ -156,6 +157,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, struct gpio_array *array_info, unsigned long *value_bitmap); +int gpiod_set_config(struct gpio_desc *desc, unsigned long config); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); void gpiod_toggle_active_low(struct gpio_desc *desc); @@ -189,6 +191,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev, #else /* CONFIG_GPIOLIB */ +#include <linux/kernel.h> + static inline int gpiod_count(struct device *dev, const char *con_id) { return 0; @@ -470,6 +474,13 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size, return 0; } +static inline int gpiod_set_config(struct gpio_desc *desc, unsigned long config) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return -ENOSYS; +} + static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { /* GPIO can never have been requested */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 6ef05bccc0a6..b8fc92c177eb 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -87,7 +87,7 @@ struct gpio_irq_chip { * @need_valid_mask to make these GPIO lines unavailable for * translation. */ - int (*child_to_parent_hwirq)(struct gpio_chip *chip, + int (*child_to_parent_hwirq)(struct gpio_chip *gc, unsigned int child_hwirq, unsigned int child_type, unsigned int *parent_hwirq, @@ -102,7 +102,7 @@ struct gpio_irq_chip { * variant named &gpiochip_populate_parent_fwspec_fourcell is also * available. */ - void *(*populate_parent_alloc_arg)(struct gpio_chip *chip, + void *(*populate_parent_alloc_arg)(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type); @@ -114,7 +114,7 @@ struct gpio_irq_chip { * callback. If this is not specified, then a default callback will be * provided that returns the line offset. */ - unsigned int (*child_offset_to_irq)(struct gpio_chip *chip, + unsigned int (*child_offset_to_irq)(struct gpio_chip *gc, unsigned int pin); /** @@ -209,7 +209,7 @@ struct gpio_irq_chip { * a particular driver wants to clear IRQ related registers * in order to avoid undesired events. */ - int (*init_hw)(struct gpio_chip *chip); + int (*init_hw)(struct gpio_chip *gc); /** * @init_valid_mask: optional routine to initialize @valid_mask, to be @@ -220,7 +220,7 @@ struct gpio_irq_chip { * then directly set some bits to "0" if they cannot be used for * interrupts. */ - void (*init_valid_mask)(struct gpio_chip *chip, + void (*init_valid_mask)(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios); @@ -348,40 +348,40 @@ struct gpio_chip { struct device *parent; struct module *owner; - int (*request)(struct gpio_chip *chip, + int (*request)(struct gpio_chip *gc, unsigned offset); - void (*free)(struct gpio_chip *chip, + void (*free)(struct gpio_chip *gc, unsigned offset); - int (*get_direction)(struct gpio_chip *chip, + int (*get_direction)(struct gpio_chip *gc, unsigned offset); - int (*direction_input)(struct gpio_chip *chip, + int (*direction_input)(struct gpio_chip *gc, unsigned offset); - int (*direction_output)(struct gpio_chip *chip, + int (*direction_output)(struct gpio_chip *gc, unsigned offset, int value); - int (*get)(struct gpio_chip *chip, + int (*get)(struct gpio_chip *gc, unsigned offset); - int (*get_multiple)(struct gpio_chip *chip, + int (*get_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); - void (*set)(struct gpio_chip *chip, + void (*set)(struct gpio_chip *gc, unsigned offset, int value); - void (*set_multiple)(struct gpio_chip *chip, + void (*set_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); - int (*set_config)(struct gpio_chip *chip, + int (*set_config)(struct gpio_chip *gc, unsigned offset, unsigned long config); - int (*to_irq)(struct gpio_chip *chip, + int (*to_irq)(struct gpio_chip *gc, unsigned offset); void (*dbg_show)(struct seq_file *s, - struct gpio_chip *chip); + struct gpio_chip *gc); - int (*init_valid_mask)(struct gpio_chip *chip, + int (*init_valid_mask)(struct gpio_chip *gc, unsigned long *valid_mask, unsigned int ngpios); - int (*add_pin_ranges)(struct gpio_chip *chip); + int (*add_pin_ranges)(struct gpio_chip *gc); int base; u16 ngpio; @@ -458,11 +458,11 @@ struct gpio_chip { #endif /* CONFIG_OF_GPIO */ }; -extern const char *gpiochip_is_requested(struct gpio_chip *chip, +extern const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset); /* add/remove chips */ -extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, +extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key); @@ -490,43 +490,43 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, * Otherwise it returns zero as a success code. */ #ifdef CONFIG_LOCKDEP -#define gpiochip_add_data(chip, data) ({ \ +#define gpiochip_add_data(gc, data) ({ \ static struct lock_class_key lock_key; \ static struct lock_class_key request_key; \ - gpiochip_add_data_with_key(chip, data, &lock_key, \ + gpiochip_add_data_with_key(gc, data, &lock_key, \ &request_key); \ }) #else -#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) +#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL) #endif /* CONFIG_LOCKDEP */ -static inline int gpiochip_add(struct gpio_chip *chip) +static inline int gpiochip_add(struct gpio_chip *gc) { - return gpiochip_add_data(chip, NULL); + return gpiochip_add_data(gc, NULL); } -extern void gpiochip_remove(struct gpio_chip *chip); -extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, +extern void gpiochip_remove(struct gpio_chip *gc); +extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, void *data); extern struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *chip, void *data)); + int (*match)(struct gpio_chip *gc, void *data)); -bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); -int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset); +int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset); /* Line status inquiry for drivers */ -bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset); -bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset); +bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset); /* Sleep persistence inquiry for drivers */ -bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset); -bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); +bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset); +bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset); /* get driver data */ -void *gpiochip_get_data(struct gpio_chip *chip); +void *gpiochip_get_data(struct gpio_chip *gc); struct bgpio_pdata { const char *label; @@ -536,23 +536,23 @@ struct bgpio_pdata { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type); #else -static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, +static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { return NULL; } -static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, +static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { @@ -572,6 +572,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3) #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ +#define BGPIOF_NO_SET_ON_INPUT BIT(6) int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); @@ -582,11 +583,11 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain, void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data); -void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, +void gpiochip_set_nested_irqchip(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int parent_irq); -int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, +int gpiochip_irqchip_add_key(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -595,7 +596,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, struct lock_class_key *lock_key, struct lock_class_key *request_key); -bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); #ifdef CONFIG_LOCKDEP @@ -606,7 +607,7 @@ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, * boilerplate static inlines provides such a key for each * unique instance. */ -static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -615,12 +616,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, static struct lock_class_key lock_key; static struct lock_class_key request_key; - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, false, &lock_key, &request_key); } -static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -630,35 +631,35 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, static struct lock_class_key lock_key; static struct lock_class_key request_key; - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, true, &lock_key, &request_key); } #else /* ! CONFIG_LOCKDEP */ -static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, false, NULL, NULL); } -static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, +static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { - return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, + return gpiochip_irqchip_add_key(gc, irqchip, first_irq, handler, type, true, NULL, NULL); } #endif /* CONFIG_LOCKDEP */ -int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); -void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); -int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, +int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset); +void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset); +int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset, unsigned long config); /** @@ -675,25 +676,25 @@ struct gpio_pin_range { #ifdef CONFIG_PINCTRL -int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, +int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); -int gpiochip_add_pingroup_range(struct gpio_chip *chip, +int gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group); -void gpiochip_remove_pin_ranges(struct gpio_chip *chip); +void gpiochip_remove_pin_ranges(struct gpio_chip *gc); #else /* ! CONFIG_PINCTRL */ static inline int -gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, +gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { return 0; } static inline int -gpiochip_add_pingroup_range(struct gpio_chip *chip, +gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { @@ -701,27 +702,27 @@ gpiochip_add_pingroup_range(struct gpio_chip *chip, } static inline void -gpiochip_remove_pin_ranges(struct gpio_chip *chip) +gpiochip_remove_pin_ranges(struct gpio_chip *gc) { } #endif /* CONFIG_PINCTRL */ -struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, unsigned int hwnum, const char *label, enum gpio_lookup_flags lflags, enum gpiod_flags dflags); void gpiochip_free_own_desc(struct gpio_desc *desc); -void devprop_gpiochip_set_names(struct gpio_chip *chip, +void devprop_gpiochip_set_names(struct gpio_chip *gc, const struct fwnode_handle *fwnode); #ifdef CONFIG_GPIOLIB /* lock/unlock as IRQ */ -int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset); struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); @@ -735,14 +736,14 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) return ERR_PTR(-ENODEV); } -static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, +static inline int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { WARN_ON(1); return -EINVAL; } -static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, +static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset) { WARN_ON(1); diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index da0af631ded5..7c8b82f69288 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -37,7 +37,7 @@ extern void rcu_nmi_exit(void); do { \ account_irq_enter_time(current); \ preempt_count_add(HARDIRQ_OFFSET); \ - trace_hardirq_enter(); \ + lockdep_hardirq_enter(); \ } while (0) /* @@ -50,7 +50,7 @@ extern void irq_enter(void); */ #define __irq_exit() \ do { \ - trace_hardirq_exit(); \ + lockdep_hardirq_exit(); \ account_irq_exit_time(current); \ preempt_count_sub(HARDIRQ_OFFSET); \ } while (0) @@ -74,12 +74,12 @@ extern void irq_exit(void); BUG_ON(in_nmi()); \ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ rcu_nmi_enter(); \ - trace_hardirq_enter(); \ + lockdep_hardirq_enter(); \ } while (0) #define nmi_exit() \ do { \ - trace_hardirq_exit(); \ + lockdep_hardirq_exit(); \ rcu_nmi_exit(); \ BUG_ON(!in_nmi()); \ preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index 417d2c4bc60d..78b6ea5fa8ba 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -145,7 +145,7 @@ static inline void hash_del_rcu(struct hlist_node *node) * hash entry * @name: hashtable to iterate * @bkt: integer to use as bucket loop cursor - * @tmp: a &struct used for temporary storage + * @tmp: a &struct hlist_node used for temporary storage * @obj: the type * to use as a loop cursor for each entry * @member: the name of the hlist_node within the struct */ @@ -197,7 +197,7 @@ static inline void hash_del_rcu(struct hlist_node *node) * same bucket safe against removals * @name: hashtable to iterate * @obj: the type * to use as a loop cursor for each entry - * @tmp: a &struct used for temporary storage + * @tmp: a &struct hlist_node used for temporary storage * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 9918a6c910c5..9613d796cfb1 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -207,7 +207,7 @@ struct hdmi_drm_infoframe { u16 max_fall; }; -int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); +void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, diff --git a/include/linux/hid.h b/include/linux/hid.h index cd41f209043f..875f71132b14 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -492,7 +492,7 @@ struct hid_report_enum { }; #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */ -#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */ +#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */ #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */ #define HID_OUTPUT_FIFO_SIZE 64 diff --git a/include/linux/hmm.h b/include/linux/hmm.h index ddf9f7144c43..7475051100c7 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -3,58 +3,8 @@ * Copyright 2013 Red Hat Inc. * * Authors: Jérôme Glisse <jglisse@redhat.com> - */ -/* - * Heterogeneous Memory Management (HMM) - * - * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it - * is for. Here we focus on the HMM API description, with some explanation of - * the underlying implementation. - * - * Short description: HMM provides a set of helpers to share a virtual address - * space between CPU and a device, so that the device can access any valid - * address of the process (while still obeying memory protection). HMM also - * provides helpers to migrate process memory to device memory, and back. Each - * set of functionality (address space mirroring, and migration to and from - * device memory) can be used independently of the other. - * - * - * HMM address space mirroring API: - * - * Use HMM address space mirroring if you want to mirror a range of the CPU - * page tables of a process into a device page table. Here, "mirror" means "keep - * synchronized". Prerequisites: the device must provide the ability to write- - * protect its page tables (at PAGE_SIZE granularity), and must be able to - * recover from the resulting potential page faults. - * - * HMM guarantees that at any point in time, a given virtual address points to - * either the same memory in both CPU and device page tables (that is: CPU and - * device page tables each point to the same pages), or that one page table (CPU - * or device) points to no entry, while the other still points to the old page - * for the address. The latter case happens when the CPU page table update - * happens first, and then the update is mirrored over to the device page table. - * This does not cause any issue, because the CPU page table cannot start - * pointing to a new page until the device page table is invalidated. - * - * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any - * updates to each device driver that has registered a mirror. It also provides - * some API calls to help with taking a snapshot of the CPU page table, and to - * synchronize with any updates that might happen concurrently. * - * - * HMM migration to and from device memory: - * - * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with - * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page - * of the device memory, and allows the device driver to manage its memory - * using those struct pages. Having struct pages for device memory makes - * migration easier. Because that memory is not addressable by the CPU it must - * never be pinned to the device; in other words, any CPU page fault can always - * cause the device memory to be migrated (copied/moved) back to regular memory. - * - * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that - * allows use of a device DMA engine to perform the copy operation between - * regular system memory and device memory. + * See Documentation/vm/hmm.rst for reasons and overview of what HMM is. */ #ifndef LINUX_HMM_H #define LINUX_HMM_H @@ -74,7 +24,6 @@ * Flags: * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_WRITE: CPU page table has write permission set - * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) * * The driver provides a flags array for mapping page protections to device * PTE bits. If the driver valid bit for an entry is bit 3, @@ -86,7 +35,6 @@ enum hmm_pfn_flag_e { HMM_PFN_VALID = 0, HMM_PFN_WRITE, - HMM_PFN_DEVICE_PRIVATE, HMM_PFN_FLAG_MAX }; @@ -122,9 +70,6 @@ enum hmm_pfn_value_e { * * @notifier: a mmu_interval_notifier that includes the start/end * @notifier_seq: result of mmu_interval_read_begin() - * @hmm: the core HMM structure this range is active against - * @vma: the vm area struct for the range - * @list: all range lock are on a list * @start: range virtual start address (inclusive) * @end: range virtual end address (exclusive) * @pfns: array of pfns (big enough for the range) @@ -132,8 +77,8 @@ enum hmm_pfn_value_e { * @values: pfn value for some special case (none, special, error, ...) * @default_flags: default flags for the range (write, read, ... see hmm doc) * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter - * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) - * @valid: pfns array did not change since it has been fill by an HMM function + * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT) + * @dev_private_owner: owner of device private pages */ struct hmm_range { struct mmu_interval_notifier *notifier; @@ -146,6 +91,7 @@ struct hmm_range { uint64_t default_flags; uint64_t pfn_flags_mask; uint8_t pfn_shift; + void *dev_private_owner; }; /* @@ -172,70 +118,9 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang } /* - * hmm_device_entry_to_pfn() - return pfn value store in a device entry - * @range: range use to decode device entry value - * @entry: device entry to extract pfn from - * Return: pfn value if device entry is valid, -1UL otherwise - */ -static inline unsigned long -hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) -{ - if (pfn == range->values[HMM_PFN_NONE]) - return -1UL; - if (pfn == range->values[HMM_PFN_ERROR]) - return -1UL; - if (pfn == range->values[HMM_PFN_SPECIAL]) - return -1UL; - if (!(pfn & range->flags[HMM_PFN_VALID])) - return -1UL; - return (pfn >> range->pfn_shift); -} - -/* - * hmm_device_entry_from_page() - create a valid device entry for a page - * @range: range use to encode HMM pfn value - * @page: page for which to create the device entry - * Return: valid device entry for the page - */ -static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, - struct page *page) -{ - return (page_to_pfn(page) << range->pfn_shift) | - range->flags[HMM_PFN_VALID]; -} - -/* - * hmm_device_entry_from_pfn() - create a valid device entry value from pfn - * @range: range use to encode HMM pfn value - * @pfn: pfn value for which to create the device entry - * Return: valid device entry for the pfn - */ -static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, - unsigned long pfn) -{ - return (pfn << range->pfn_shift) | - range->flags[HMM_PFN_VALID]; -} - -/* - * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. - */ -#define HMM_FAULT_ALLOW_RETRY (1 << 0) - -/* Don't fault in missing PTEs, just snapshot the current state. */ -#define HMM_FAULT_SNAPSHOT (1 << 1) - -#ifdef CONFIG_HMM_MIRROR -/* * Please see Documentation/vm/hmm.rst for how to use the range API. */ -long hmm_range_fault(struct hmm_range *range, unsigned int flags); -#else -static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags) -{ - return -EOPNOTSUPP; -} -#endif +long hmm_range_fault(struct hmm_range *range); /* * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 5aca3d1bdb32..cfbb0a87c5f0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -46,9 +46,46 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, - int prot_numa); -vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); -vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); + unsigned long cp_flags); +vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write); + +/** + * vmf_insert_pfn_pmd - insert a pmd size pfn + * @vmf: Structure describing the fault + * @pfn: pfn to insert + * @pgprot: page protection to use + * @write: whether it's a write fault + * + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. + * + * Return: vm_fault_t value. + */ +static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, + bool write) +{ + return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); +} +vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, + pgprot_t pgprot, bool write); + +/** + * vmf_insert_pfn_pud - insert a pud size pfn + * @vmf: Structure describing the fault + * @pfn: pfn to insert + * @pgprot: page protection to use + * @write: whether it's a write fault + * + * Insert a pud size pfn. See vmf_insert_pfn() for additional info. + * + * Return: vm_fault_t value. + */ +static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, + bool write) +{ + return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); +} + enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, @@ -87,8 +124,6 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) -extern bool is_vma_temporary_stack(struct vm_area_struct *vma); - extern unsigned long transparent_hugepage_flags; /* @@ -100,7 +135,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (vma->vm_flags & VM_NOHUGEPAGE) return false; - if (is_vma_temporary_stack(vma)) + if (vma_is_temporary_stack(vma)) return false; if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) @@ -289,7 +324,11 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) -#define hpage_nr_pages(x) 1 +static inline int hpage_nr_pages(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + return 1; +} static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 1e897e4168ac..5ea05879a0a9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -46,7 +46,52 @@ struct resv_map { long adds_in_progress; struct list_head region_cache; long region_cache_count; +#ifdef CONFIG_CGROUP_HUGETLB + /* + * On private mappings, the counter to uncharge reservations is stored + * here. If these fields are 0, then either the mapping is shared, or + * cgroup accounting is disabled for this resv_map. + */ + struct page_counter *reservation_counter; + unsigned long pages_per_hpage; + struct cgroup_subsys_state *css; +#endif +}; + +/* + * Region tracking -- allows tracking of reservations and instantiated pages + * across the pages in a mapping. + * + * The region data structures are embedded into a resv_map and protected + * by a resv_map's lock. The set of regions within the resv_map represent + * reservations for huge pages, or huge pages that have already been + * instantiated within the map. The from and to elements are huge page + * indicies into the associated mapping. from indicates the starting index + * of the region. to represents the first index past the end of the region. + * + * For example, a file region structure with from == 0 and to == 4 represents + * four huge pages in a mapping. It is important to note that the to element + * represents the first element past the end of the region. This is used in + * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. + * + * Interval notation of the form [from, to) will be used to indicate that + * the endpoint from is inclusive and to is exclusive. + */ +struct file_region { + struct list_head link; + long from; + long to; +#ifdef CONFIG_CGROUP_HUGETLB + /* + * On shared mappings, each reserved region appears as a struct + * file_region in resv_map. These fields hold the info needed to + * uncharge each reservation. + */ + struct page_counter *reservation_counter; + struct cgroup_subsys_state *css; +#endif }; + extern struct resv_map *resv_map_alloc(void); void resv_map_release(struct kref *ref); @@ -109,6 +154,8 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); +struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); + extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages; @@ -151,6 +198,12 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } +static inline struct address_space *hugetlb_page_mapping_lock_write( + struct page *hpage) +{ + return NULL; +} + static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { @@ -390,7 +443,10 @@ static inline bool is_file_hugepages(struct file *file) return is_file_shm_hugepages(file); } - +static inline struct hstate *hstate_inode(struct inode *i) +{ + return HUGETLBFS_SB(i->i_sb)->hstate; +} #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) false @@ -402,6 +458,10 @@ hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, return ERR_PTR(-ENOSYS); } +static inline struct hstate *hstate_inode(struct inode *i) +{ + return NULL; +} #endif /* !CONFIG_HUGETLBFS */ #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA @@ -432,8 +492,8 @@ struct hstate { unsigned int surplus_huge_pages_node[MAX_NUMNODES]; #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ - struct cftype cgroup_files_dfl[5]; - struct cftype cgroup_files_legacy[5]; + struct cftype cgroup_files_dfl[7]; + struct cftype cgroup_files_legacy[9]; #endif char name[HSTATE_NAME_LEN]; }; @@ -472,11 +532,6 @@ extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) -static inline struct hstate *hstate_inode(struct inode *i) -{ - return HUGETLBFS_SB(i->i_sb)->hstate; -} - static inline struct hstate *hstate_file(struct file *f) { return hstate_inode(file_inode(f)); @@ -729,11 +784,6 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma) return NULL; } -static inline struct hstate *hstate_inode(struct inode *i) -{ - return NULL; -} - static inline struct hstate *page_hstate(struct page *page) { return NULL; diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 063962f6dfc6..2ad6e92f124a 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -18,34 +18,96 @@ #include <linux/mmdebug.h> struct hugetlb_cgroup; +struct resv_map; +struct file_region; + /* * Minimum page order trackable by hugetlb cgroup. - * At least 3 pages are necessary for all the tracking information. + * At least 4 pages are necessary for all the tracking information. + * The second tail page (hpage[2]) is the fault usage cgroup. + * The third tail page (hpage[3]) is the reservation usage cgroup. */ #define HUGETLB_CGROUP_MIN_ORDER 2 #ifdef CONFIG_CGROUP_HUGETLB +enum hugetlb_memory_event { + HUGETLB_MAX, + HUGETLB_NR_MEMORY_EVENTS, +}; -static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) +struct hugetlb_cgroup { + struct cgroup_subsys_state css; + + /* + * the counter to account for hugepages from hugetlb. + */ + struct page_counter hugepage[HUGE_MAX_HSTATE]; + + /* + * the counter to account for hugepage reservations from hugetlb. + */ + struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE]; + + atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS]; + atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS]; + + /* Handle for "hugetlb.events" */ + struct cgroup_file events_file[HUGE_MAX_HSTATE]; + + /* Handle for "hugetlb.events.local" */ + struct cgroup_file events_local_file[HUGE_MAX_HSTATE]; +}; + +static inline struct hugetlb_cgroup * +__hugetlb_cgroup_from_page(struct page *page, bool rsvd) { VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; - return (struct hugetlb_cgroup *)page[2].private; + if (rsvd) + return (struct hugetlb_cgroup *)page[3].private; + else + return (struct hugetlb_cgroup *)page[2].private; +} + +static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) +{ + return __hugetlb_cgroup_from_page(page, false); +} + +static inline struct hugetlb_cgroup * +hugetlb_cgroup_from_page_rsvd(struct page *page) +{ + return __hugetlb_cgroup_from_page(page, true); } -static inline -int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) +static inline int __set_hugetlb_cgroup(struct page *page, + struct hugetlb_cgroup *h_cg, bool rsvd) { VM_BUG_ON_PAGE(!PageHuge(page), page); if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; - page[2].private = (unsigned long)h_cg; + if (rsvd) + page[3].private = (unsigned long)h_cg; + else + page[2].private = (unsigned long)h_cg; return 0; } +static inline int set_hugetlb_cgroup(struct page *page, + struct hugetlb_cgroup *h_cg) +{ + return __set_hugetlb_cgroup(page, h_cg, false); +} + +static inline int set_hugetlb_cgroup_rsvd(struct page *page, + struct hugetlb_cgroup *h_cg) +{ + return __set_hugetlb_cgroup(page, h_cg, true); +} + static inline bool hugetlb_cgroup_disabled(void) { return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); @@ -53,25 +115,67 @@ static inline bool hugetlb_cgroup_disabled(void) extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr); +extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, + struct hugetlb_cgroup **ptr); extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page); +extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page); extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page); +extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages, + struct page *page); + extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg); +extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg); +extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, + unsigned long start, + unsigned long end); + +extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, + struct file_region *rg, + unsigned long nr_pages); + extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage); #else +static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, + struct file_region *rg, + unsigned long nr_pages) +{ +} + static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) { return NULL; } -static inline -int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) +static inline struct hugetlb_cgroup * +hugetlb_cgroup_from_page_resv(struct page *page) +{ + return NULL; +} + +static inline struct hugetlb_cgroup * +hugetlb_cgroup_from_page_rsvd(struct page *page) +{ + return NULL; +} + +static inline int set_hugetlb_cgroup(struct page *page, + struct hugetlb_cgroup *h_cg) +{ + return 0; +} + +static inline int set_hugetlb_cgroup_rsvd(struct page *page, + struct hugetlb_cgroup *h_cg) { return 0; } @@ -81,28 +185,57 @@ static inline bool hugetlb_cgroup_disabled(void) return true; } -static inline int -hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, - struct hugetlb_cgroup **ptr) +static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, + struct hugetlb_cgroup **ptr) { return 0; } -static inline void -hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg, - struct page *page) +static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx, + unsigned long nr_pages, + struct hugetlb_cgroup **ptr) +{ + return 0; +} + +static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page) { } static inline void -hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) +hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg, + struct page *page) +{ +} + +static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, + struct page *page) +{ +} + +static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx, + unsigned long nr_pages, + struct page *page) +{ +} +static inline void hugetlb_cgroup_uncharge_cgroup(int idx, + unsigned long nr_pages, + struct hugetlb_cgroup *h_cg) { } static inline void -hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, - struct hugetlb_cgroup *h_cg) +hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, + struct hugetlb_cgroup *h_cg) +{ +} + +static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, + unsigned long start, + unsigned long end) { } diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 585ad6fc3847..8c5459034f92 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -15,24 +15,19 @@ /** * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client - * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0) - * triggered * @irq: IRQ number, if the smbus_alert driver should take care of interrupt * handling * * If irq is not specified, the smbus_alert driver doesn't take care of * interrupt handling. In that case it is up to the I2C bus driver to either * handle the interrupts or to poll for alerts. - * - * If irq is specified then it it crucial that alert_edge_triggered is - * properly set. */ struct i2c_smbus_alert_setup { int irq; }; -struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, - struct i2c_smbus_alert_setup *setup); +struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup); int i2c_handle_smbus_alert(struct i2c_client *ara); #if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index f834687989f7..456fc17ecb1c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -39,6 +39,14 @@ enum i2c_slave_event; typedef int (*i2c_slave_cb_t)(struct i2c_client *client, enum i2c_slave_event event, u8 *val); +/* I2C Frequency Modes */ +#define I2C_MAX_STANDARD_MODE_FREQ 100000 +#define I2C_MAX_FAST_MODE_FREQ 400000 +#define I2C_MAX_FAST_MODE_PLUS_FREQ 1000000 +#define I2C_MAX_TURBO_MODE_FREQ 1400000 +#define I2C_MAX_HIGH_SPEED_MODE_FREQ 3400000 +#define I2C_MAX_ULTRA_FAST_MODE_FREQ 5000000 + struct module; struct property_entry; @@ -506,7 +514,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context * so e.g. PMICs can be accessed very late before shutdown. Optional. * @functionality: Return the flags that this algorithm/adapter pair supports - * from the I2C_FUNC_* flags. + * from the ``I2C_FUNC_*`` flags. * @reg_slave: Register given client to I2C slave mode of this adapter * @unreg_slave: Unregister given client from I2C slave mode of this adapter * @@ -515,7 +523,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 * to name two of the most common. * - * The return codes from the @master_xfer{_atomic} fields should indicate the + * The return codes from the ``master_xfer{_atomic}`` fields should indicate the * type of error code that occurred during the transfer, as documented in the * Kernel Documentation file Documentation/i2c/fault-codes.rst. */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 93338fd54af8..33d379602314 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -22,19 +22,23 @@ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); +#if IS_ENABLED(CONFIG_NF_NAT) +void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); +#else +#define icmpv6_ndo_send icmpv6_send +#endif + #else static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { - } -#endif -#if IS_ENABLED(CONFIG_NF_NAT) -void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); -#else -#define icmpv6_ndo_send icmpv6_send +static inline void icmpv6_ndo_send(struct sk_buff *skb, + u8 type, u8 code, __u32 info) +{ +} #endif extern int icmpv6_init(void); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 7d3f2ced92d1..16268ef1cbcc 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -620,6 +620,15 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) } /** + * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_any_nullfunc(__le16 fc) +{ + return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); +} + +/** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @fc: frame control field in little-endian byteorder */ @@ -2047,13 +2056,13 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 #define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000 #define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 -#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 +#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size - * @he_oper_ie: byte data of the He Operations IE, stating from the the byte + * @he_oper_ie: byte data of the He Operations IE, stating from the byte * after the ext ID byte. It is assumed that he_oper_ie has at least * sizeof(struct ieee80211_he_operation) bytes, the caller must have * validated this. @@ -2091,7 +2100,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) /* * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size - * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the the byte + * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte * after the ext ID byte. It is assumed that he_spr_ie has at least * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated * this @@ -2102,14 +2111,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) { struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; u8 spr_len = sizeof(struct ieee80211_he_spr); - u32 he_spr_params; + u8 he_spr_params; /* Make sure the input is not NULL */ if (!he_spr_ie) return 0; /* Calc required length */ - he_spr_params = le32_to_cpu(he_spr->he_sr_control); + he_spr_params = he_spr->he_sr_control; if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) spr_len++; if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) @@ -2523,6 +2532,7 @@ enum ieee80211_eid { WLAN_EID_FILS_INDICATION = 240, WLAN_EID_DILS = 241, WLAN_EID_FRAGMENT = 242, + WLAN_EID_RSNX = 244, WLAN_EID_EXTENSION = 255 }; @@ -2734,7 +2744,7 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6) -/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */ #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) #define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) @@ -3034,6 +3044,7 @@ struct ieee80211_multiple_bssid_configuration { #define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) #define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) #define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) +#define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18) #define WLAN_MAX_KEY_LEN 32 @@ -3412,4 +3423,11 @@ static inline bool for_each_element_completed(const struct element *element, return (const u8 *)element == (const u8 *)data + datalen; } +/** + * RSNX Capabilities: + * bits 0-3: Field length (n-1) + */ +#define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4) +#define WLAN_RSNX_CAPA_SAE_H2E BIT(5) + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 862ce0019eba..eed58ed2f368 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -568,6 +568,8 @@ struct iio_dev { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; + char read_buf[20]; + unsigned int read_buf_len; #endif }; diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index d2fcf45b4cef..dd8219138c2e 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -41,9 +41,16 @@ struct adis_timeout { * @glob_cmd_reg: Register address of the GLOB_CMD register * @msc_ctrl_reg: Register address of the MSC_CTRL register * @diag_stat_reg: Register address of the DIAG_STAT register + * @prod_id_reg: Register address of the PROD_ID register + * @prod_id: Product ID code that should be expected when reading @prod_id_reg + * @self_test_mask: Bitmask of supported self-test operations + * @self_test_reg: Register address to request self test command + * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg * @status_error_msgs: Array of error messgaes - * @status_error_mask: + * @status_error_mask: Bitmask of errors supported by the device * @timeouts: Chip specific delays + * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable + * @has_paging: True if ADIS device has paged registers */ struct adis_data { unsigned int read_delay; @@ -53,8 +60,12 @@ struct adis_data { unsigned int glob_cmd_reg; unsigned int msc_ctrl_reg; unsigned int diag_stat_reg; + unsigned int prod_id_reg; + + unsigned int prod_id; unsigned int self_test_mask; + unsigned int self_test_reg; bool self_test_no_autoclear; const struct adis_timeout *timeouts; @@ -66,6 +77,20 @@ struct adis_data { bool has_paging; }; +/** + * struct adis - ADIS device instance data + * @spi: Reference to SPI device which owns this ADIS IIO device + * @trig: IIO trigger object data + * @data: ADIS chip variant specific data + * @burst: ADIS burst transfer information + * @state_lock: Lock used by the device to protect state + * @msg: SPI message object + * @xfer: SPI transfer objects to be used for a @msg + * @current_page: Some ADIS devices have registers, this selects current page + * @buffer: Data buffer for information read from the device + * @tx: DMA safe TX buffer for SPI transfers + * @rx: DMA safe RX buffer for SPI transfers + */ struct adis { struct spi_device *spi; struct iio_trigger *trig; @@ -73,6 +98,17 @@ struct adis { const struct adis_data *data; struct adis_burst *burst; + /** + * The state_lock is meant to be used during operations that require + * a sequence of SPI R/W in order to protect the SPI transfer + * information (fields 'xfer', 'msg' & 'current_page') between + * potential concurrent accesses. + * This lock is used by all "adis_{functions}" that have to read/write + * registers. These functions also have unlocked variants + * (see "__adis_{functions}"), which don't hold this lock. + * This allows users of the ADIS library to group SPI R/W into + * the drivers, but they also must manage this lock themselves. + */ struct mutex state_lock; struct spi_message msg; struct spi_transfer *xfer; @@ -297,6 +333,7 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, int adis_enable_irq(struct adis *adis, bool enable); int __adis_check_status(struct adis *adis); +int __adis_initial_startup(struct adis *adis); static inline int adis_check_status(struct adis *adis) { @@ -309,7 +346,17 @@ static inline int adis_check_status(struct adis *adis) return ret; } -int adis_initial_startup(struct adis *adis); +/* locked version of __adis_initial_startup() */ +static inline int adis_initial_startup(struct adis *adis) +{ + int ret; + + mutex_lock(&adis->state_lock); + ret = __adis_initial_startup(adis); + mutex_unlock(&adis->state_lock); + + return ret; +} int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int error_mask, diff --git a/include/linux/ima.h b/include/linux/ima.h index 1659217e9b60..aefe758f4466 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -30,8 +30,7 @@ extern void ima_kexec_cmdline(const void *buf, int size); extern void ima_add_kexec_buffer(struct kimage *image); #endif -#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) \ - || defined(CONFIG_PPC_SECURE_BOOT) +#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT extern bool arch_ima_get_secureboot(void); extern const char * const *arch_get_ima_policy(void); #else diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 39faaaf843e1..ce9ed1c0602f 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -2,24 +2,17 @@ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1 +#include <net/netlink.h> #include <uapi/linux/inet_diag.h> -struct net; -struct sock; struct inet_hashinfo; -struct nlattr; -struct nlmsghdr; -struct sk_buff; -struct netlink_callback; struct inet_diag_handler { void (*dump)(struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r); - int (*dump_one)(struct sk_buff *in_skb, - const struct nlmsghdr *nlh, + int (*dump_one)(struct netlink_callback *cb, const struct inet_diag_req_v2 *req); void (*idiag_get_info)(struct sock *sk, @@ -40,18 +33,25 @@ struct inet_diag_handler { __u16 idiag_info_size; }; +struct bpf_sk_storage_diag; +struct inet_diag_dump_data { + struct nlattr *req_nlas[__INET_DIAG_REQ_MAX]; +#define inet_diag_nla_bc req_nlas[INET_DIAG_REQ_BYTECODE] +#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES] + + struct bpf_sk_storage_diag *bpf_stg_diag; +}; + struct inet_connection_sock; int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, - struct sk_buff *skb, const struct inet_diag_req_v2 *req, - struct user_namespace *user_ns, - u32 pid, u32 seq, u16 nlmsg_flags, - const struct nlmsghdr *unlh, bool net_admin); + struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + u16 nlmsg_flags, bool net_admin); void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb, struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - struct nlattr *bc); + const struct inet_diag_req_v2 *r); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, - struct sk_buff *in_skb, const struct nlmsghdr *nlh, + struct netlink_callback *cb, const struct inet_diag_req_v2 *req); struct sock *inet_diag_find_one_icsk(struct net *net, @@ -62,6 +62,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk); +static inline size_t inet_diag_msg_attrs_size(void) +{ + return nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ +#if IS_ENABLED(CONFIG_IPV6) + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ +#endif + + nla_total_size(4) /* INET_DIAG_MARK */ + + nla_total_size(4); /* INET_DIAG_CLASS_ID */ +} int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, struct user_namespace *user_ns, bool net_admin); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4a16b39ae353..980234ae0312 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -123,6 +123,8 @@ #define dmar_readq(a) readq(a) #define dmar_writeq(a,v) writeq(v,a) +#define dmar_readl(a) readl(a) +#define dmar_writel(a, v) writel(v, a) #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c5fe60ec6b84..80f637c3a6f3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -248,6 +248,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type); extern int prepare_percpu_nmi(unsigned int irq); extern void teardown_percpu_nmi(unsigned int irq); +extern int irq_inject_interrupt(unsigned int irq); + /* The following three functions are for the core kernel use only. */ extern void suspend_device_irqs(void); extern void resume_device_irqs(void); diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 837058bc1c9f..b336622612f3 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -16,7 +16,7 @@ * The io_mapping mechanism provides an abstraction for mapping * individual pages from an io device to the CPU in an efficient fashion. * - * See Documentation/io-mapping.txt + * See Documentation/driver-api/io-mapping.rst */ struct io_mapping { diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h deleted file mode 100644 index 38b286e9a46c..000000000000 --- a/include/linux/ioc3.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org> - */ - -#ifndef _LINUX_IOC3_H -#define _LINUX_IOC3_H - -#include <asm/sn/ioc3.h> - -#define IOC3_MAX_SUBMODULES 32 - -#define IOC3_CLASS_NONE 0 -#define IOC3_CLASS_BASE_IP27 1 -#define IOC3_CLASS_BASE_IP30 2 -#define IOC3_CLASS_MENET_123 3 -#define IOC3_CLASS_MENET_4 4 -#define IOC3_CLASS_CADDUO 5 -#define IOC3_CLASS_SERIAL 6 - -/* One of these per IOC3 */ -struct ioc3_driver_data { - struct list_head list; - int id; /* IOC3 sequence number */ - /* PCI mapping */ - unsigned long pma; /* physical address */ - struct ioc3 __iomem *vma; /* pointer to registers */ - struct pci_dev *pdev; /* PCI device */ - /* IRQ stuff */ - int dual_irq; /* set if separate IRQs are used */ - int irq_io, irq_eth; /* IRQ numbers */ - /* GPIO magic */ - spinlock_t gpio_lock; - unsigned int gpdr_shadow; - /* NIC identifiers */ - char nic_part[32]; - char nic_serial[16]; - char nic_mac[6]; - /* submodule set */ - int class; - void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */ - int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */ - /* is_ir_lock must be held while - * modifying sio_ie values, so - * we can be sure that sio_ie is - * not changing when we read it - * along with sio_ir. - */ - spinlock_t ir_lock; /* SIO_IE[SC] mod lock */ -}; - -/* One per submodule */ -struct ioc3_submodule { - char *name; /* descriptive submodule name */ - struct module *owner; /* owning kernel module */ - int ethernet; /* set for ethernet drivers */ - int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *); - int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *); - int id; /* assigned by IOC3, index for the "data" array */ - /* IRQ stuff */ - unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */ - int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */ - int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); - /* private submodule data */ - void *data; /* assigned by submodule */ -}; - -/********************************** - * Functions needed by submodules * - **********************************/ - -#define IOC3_W_IES 0 -#define IOC3_W_IEC 1 - -/* registers a submodule for all existing and future IOC3 chips */ -extern int ioc3_register_submodule(struct ioc3_submodule *); -/* unregisters a submodule */ -extern void ioc3_unregister_submodule(struct ioc3_submodule *); -/* enables IRQs indicated by irq_mask for a specified IOC3 chip */ -extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); -/* ackowledges specified IRQs */ -extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); -/* disables IRQs indicated by irq_mask for a specified IOC3 chip */ -extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int); -/* atomically sets GPCR bits */ -extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int); -/* general ireg writer */ -extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg); - -#endif diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index dba15ca8e60b..1dcd9198beb7 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -8,6 +8,7 @@ enum { ICQ_EXITED = 1 << 2, + ICQ_DESTROYED = 1 << 3, }; /* diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 35e15dfd4155..cb20c733b15a 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -14,36 +14,41 @@ #include <linux/io.h> /** - * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs - * @op: accessor function (takes @addr as its only argument) - * @addr: Address to poll + * read_poll_timeout - Periodically poll an address until a condition is + * met or a timeout occurs + * @op: accessor function (takes @args as its arguments) * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 * tight-loops). Should be less than ~20ms since usleep_range * is used (see Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout + * @sleep_before_read: if it is true, sleep @sleep_us before read. + * @args: arguments for @op poll * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @addr is stored in @val. Must not + * case, the last read value at @args is stored in @val. Must not * be called from atomic context if sleep_us or timeout_us are used. * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. */ -#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ +#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ + sleep_before_read, args...) \ ({ \ u64 __timeout_us = (timeout_us); \ unsigned long __sleep_us = (sleep_us); \ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ might_sleep_if((__sleep_us) != 0); \ + if (sleep_before_read && __sleep_us) \ + usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ for (;;) { \ - (val) = op(addr); \ + (val) = op(args); \ if (cond) \ break; \ if (__timeout_us && \ ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(addr); \ + (val) = op(args); \ break; \ } \ if (__sleep_us) \ @@ -53,6 +58,27 @@ }) /** + * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs + * @op: accessor function (takes @addr as its only argument) + * @addr: Address to poll + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.rst). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ + read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr) + +/** * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs * @op: accessor function (takes @addr as its only argument) * @addr: Address to poll diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ea7c7906591e..2cb445a8fc9e 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -74,6 +74,7 @@ struct ipv6_devconf { __u32 addr_gen_mode; __s32 disable_policy; __s32 ndisc_tclass; + __s32 rpl_seg_enabled; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 3ed5a055b5f4..9315fbb87db3 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -211,6 +211,8 @@ struct irq_data { * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required + * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked + * from actual interrupt context. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -234,6 +236,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -303,6 +306,16 @@ static inline bool irqd_is_single_target(struct irq_data *d) return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } +static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; +} + +static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; +} + static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 02da997ad12c..3b752e80c017 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -18,6 +18,8 @@ /* Doesn't want IPI, wait for tick: */ #define IRQ_WORK_LAZY BIT(2) +/* Run hard IRQ context, even on RT */ +#define IRQ_WORK_HARD_IRQ BIT(3) #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h index b9850f5f1906..fa8c0455c352 100644 --- a/include/linux/irqchip/arm-gic-common.h +++ b/include/linux/irqchip/arm-gic-common.h @@ -32,6 +32,8 @@ struct gic_kvm_info { struct resource vctrl; /* vlpi support */ bool has_v4; + /* rvpeid support */ + bool has_v4_1; }; const struct gic_kvm_info *gic_get_kvm_info(void); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 83439bfb6c5b..765d9b769b69 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -57,6 +57,7 @@ #define GICD_SPENDSGIR 0x0F20 #define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) #define GICD_CTLR_DS (1U << 6) #define GICD_CTLR_ARE_NS (1U << 4) #define GICD_CTLR_ENABLE_G1A (1U << 1) @@ -90,6 +91,7 @@ #define GICD_TYPER_ESPIS(typer) \ (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) +#define GICD_TYPER2_nASSGIcap (1U << 8) #define GICD_TYPER2_VIL (1U << 7) #define GICD_TYPER2_VID GENMASK(4, 0) @@ -320,6 +322,9 @@ #define GICR_VPENDBASER_NonShareable \ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) @@ -343,6 +348,15 @@ #define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) #define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + /* * ITS registers, offsets from ITS_base */ @@ -366,6 +380,11 @@ #define GITS_TRANSLATER 0x10040 +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + #define GITS_CTLR_ENABLE (1U << 0) #define GITS_CTLR_ImDe (1U << 1) #define GITS_CTLR_ITS_NUMBER_SHIFT 4 @@ -500,8 +519,9 @@ #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) -/* VMOVP and INVDB are the odd ones, as they dont have a physical counterpart */ +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ #define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) #define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) /* @@ -650,6 +670,7 @@ struct rdists { struct { + raw_spinlock_t rd_lock; void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index d9c34968467a..6976b8331b60 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -49,11 +49,23 @@ struct its_vpe { }; /* GICv4.1 implementations */ struct { + struct fwnode_handle *fwnode; + struct irq_domain *sgi_domain; + struct { + u8 priority; + bool enabled; + bool group; + } sgi_config[16]; atomic_t vmapp_count; }; }; /* + * Ensures mutual exclusion between affinity setting of the + * vPE and vLPI operations using vpe->col_idx. + */ + raw_spinlock_t vpe_lock; + /* * This collection ID is used to indirect the target * redistributor for this VPE. The ID itself isn't involved in * programming of the ITS. @@ -93,6 +105,7 @@ enum its_vcpu_info_cmd_type { SCHEDULE_VPE, DESCHEDULE_VPE, INVALL_VPE, + PROP_UPDATE_VSGI, }; struct its_cmd_info { @@ -105,19 +118,27 @@ struct its_cmd_info { bool g0en; bool g1en; }; + struct { + u8 priority; + bool group; + }; }; }; int its_alloc_vcpu_irqs(struct its_vm *vm); void its_free_vcpu_irqs(struct its_vm *vm); -int its_schedule_vpe(struct its_vpe *vpe, bool on); +int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en); +int its_make_vpe_non_resident(struct its_vpe *vpe, bool db); int its_invall_vpe(struct its_vpe *vpe); int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_unmap_vlpi(int irq); int its_prop_update_vlpi(int irq, u8 config, bool inv); +int its_prop_update_vsgi(int irq, u8 priority, bool group); struct irq_domain_ops; -int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); +int its_init_v4(struct irq_domain *domain, + const struct irq_domain_ops *vpe_ops, + const struct irq_domain_ops *sgi_ops); #endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index d6e2ab538ef2..8f2820c5e69e 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -24,7 +24,7 @@ struct pt_regs; * @handle_irq: highlevel irq-events handler * @preflow_handler: handler called before the flow handler (currently used by sparc) * @action: the irq action chain - * @status: status information + * @status_use_accessors: status information * @core_internal_state__do_not_mess_with_it: core internal status information * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 21619c92c377..61a9ced3aa50 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -15,15 +15,15 @@ #include <linux/typecheck.h> #include <asm/irqflags.h> -/* Currently trace_softirqs_on/off is used only by lockdep */ +/* Currently lockdep_softirqs_on/off is used only by lockdep */ #ifdef CONFIG_PROVE_LOCKING - extern void trace_softirqs_on(unsigned long ip); - extern void trace_softirqs_off(unsigned long ip); + extern void lockdep_softirqs_on(unsigned long ip); + extern void lockdep_softirqs_off(unsigned long ip); extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip); #else - static inline void trace_softirqs_on(unsigned long ip) { } - static inline void trace_softirqs_off(unsigned long ip) { } + static inline void lockdep_softirqs_on(unsigned long ip) { } + static inline void lockdep_softirqs_off(unsigned long ip) { } static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { } #endif @@ -31,15 +31,20 @@ #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); -# define trace_hardirq_context(p) ((p)->hardirq_context) -# define trace_softirq_context(p) ((p)->softirq_context) -# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) -# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) -# define trace_hardirq_enter() \ +# define lockdep_hardirq_context(p) ((p)->hardirq_context) +# define lockdep_softirq_context(p) ((p)->softirq_context) +# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) +# define lockdep_hardirq_enter() \ do { \ - current->hardirq_context++; \ + if (!current->hardirq_context++) \ + current->hardirq_threaded = 0; \ } while (0) -# define trace_hardirq_exit() \ +# define lockdep_hardirq_threaded() \ +do { \ + current->hardirq_threaded = 1; \ +} while (0) +# define lockdep_hardirq_exit() \ do { \ current->hardirq_context--; \ } while (0) @@ -51,17 +56,63 @@ do { \ do { \ current->softirq_context--; \ } while (0) + +# define lockdep_hrtimer_enter(__hrtimer) \ +({ \ + bool __expires_hardirq = true; \ + \ + if (!__hrtimer->is_hard) { \ + current->irq_config = 1; \ + __expires_hardirq = false; \ + } \ + __expires_hardirq; \ +}) + +# define lockdep_hrtimer_exit(__expires_hardirq) \ + do { \ + if (!__expires_hardirq) \ + current->irq_config = 0; \ + } while (0) + +# define lockdep_posixtimer_enter() \ + do { \ + current->irq_config = 1; \ + } while (0) + +# define lockdep_posixtimer_exit() \ + do { \ + current->irq_config = 0; \ + } while (0) + +# define lockdep_irq_work_enter(__work) \ + do { \ + if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ + current->irq_config = 1; \ + } while (0) +# define lockdep_irq_work_exit(__work) \ + do { \ + if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ + current->irq_config = 0; \ + } while (0) + #else # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) -# define trace_hardirq_context(p) 0 -# define trace_softirq_context(p) 0 -# define trace_hardirqs_enabled(p) 0 -# define trace_softirqs_enabled(p) 0 -# define trace_hardirq_enter() do { } while (0) -# define trace_hardirq_exit() do { } while (0) +# define lockdep_hardirq_context(p) 0 +# define lockdep_softirq_context(p) 0 +# define lockdep_hardirqs_enabled(p) 0 +# define lockdep_softirqs_enabled(p) 0 +# define lockdep_hardirq_enter() do { } while (0) +# define lockdep_hardirq_threaded() do { } while (0) +# define lockdep_hardirq_exit() do { } while (0) # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) +# define lockdep_hrtimer_enter(__hrtimer) false +# define lockdep_hrtimer_exit(__context) do { } while (0) +# define lockdep_posixtimer_enter() do { } while (0) +# define lockdep_posixtimer_exit() do { } while (0) +# define lockdep_irq_work_enter(__work) do { } while (0) +# define lockdep_irq_work_exit(__work) do { } while (0) #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index e3279ef24d28..fed6ba96c527 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -8,6 +8,7 @@ #include <linux/types.h> #include <linux/time.h> #include <linux/timex.h> +#include <vdso/jiffies.h> #include <asm/param.h> /* for HZ */ #include <generated/timeconst.h> @@ -59,9 +60,6 @@ extern int register_refined_jiffies(long clock_tick_rate); -/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ -#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) - /* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ #define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5cde9e7c2664..31314ca7c635 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -190,7 +190,7 @@ void kasan_init_tags(void); void *kasan_reset_tag(const void *addr); -void kasan_report(unsigned long addr, size_t size, +bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); #else /* CONFIG_KASAN_SW_TAGS */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0d9db2a14f44..9b7a8d74a9d6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -257,6 +257,13 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) +#ifndef CONFIG_PREEMPT_RT +# define cant_migrate() cant_sleep() +#else + /* Placeholder for now */ +# define cant_migrate() do { } while (0) +#endif + /** * abs - return absolute value of an argument * @x: the value. If it is unsigned type, it is converted to signed type first. diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index dded2e5a9f42..89f6a4214a70 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -37,8 +37,10 @@ enum kernfs_node_type { KERNFS_LINK = 0x0004, }; -#define KERNFS_TYPE_MASK 0x000f -#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK +#define KERNFS_TYPE_MASK 0x000f +#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK +#define KERNFS_MAX_USER_XATTRS 128 +#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10) enum kernfs_node_flag { KERNFS_ACTIVATED = 0x0010, @@ -78,6 +80,11 @@ enum kernfs_root_flag { * fhandle to access nodes of the fs. */ KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004, + + /* + * Support user xattrs to be written to nodes rooted at this root. + */ + KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008, }; /* type-specific structures for kernfs_node union members */ diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 4ded94bcf274..2ab2d6d6aeab 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -127,7 +127,7 @@ struct key_type { * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ - long (*read)(const struct key *key, char __user *buffer, size_t buflen); + long (*read)(const struct key *key, char *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index fc4b0b10210f..86249476b57f 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -247,6 +247,37 @@ __kfifo_int_must_check_helper(int val) }) /** + * kfifo_is_empty_spinlocked - returns true if the fifo is empty using + * a spinlock for locking + * @fifo: address of the fifo to be used + * @lock: spinlock to be used for locking + */ +#define kfifo_is_empty_spinlocked(fifo, lock) \ +({ \ + unsigned long __flags; \ + bool __ret; \ + spin_lock_irqsave(lock, __flags); \ + __ret = kfifo_is_empty(fifo); \ + spin_unlock_irqrestore(lock, __flags); \ + __ret; \ +}) + +/** + * kfifo_is_empty_spinlocked_noirqsave - returns true if the fifo is empty + * using a spinlock for locking, doesn't disable interrupts + * @fifo: address of the fifo to be used + * @lock: spinlock to be used for locking + */ +#define kfifo_is_empty_spinlocked_noirqsave(fifo, lock) \ +({ \ + bool __ret; \ + spin_lock(lock); \ + __ret = kfifo_is_empty(fifo); \ + spin_unlock(lock); \ + __ret; \ +}) + +/** * kfifo_is_full - returns true if the fifo is full * @fifo: address of the fifo to be used */ @@ -517,6 +548,26 @@ __kfifo_uint_must_check_helper( \ __ret; \ }) +/** + * kfifo_in_spinlocked_noirqsave - put data into fifo using a spinlock for + * locking, don't disable interrupts + * @fifo: address of the fifo to be used + * @buf: the data to be added + * @n: number of elements to be added + * @lock: pointer to the spinlock to use for locking + * + * This is a variant of kfifo_in_spinlocked() but uses spin_lock/unlock() + * for locking and doesn't disable interrupts. + */ +#define kfifo_in_spinlocked_noirqsave(fifo, buf, n, lock) \ +({ \ + unsigned int __ret; \ + spin_lock(lock); \ + __ret = kfifo_in(fifo, buf, n); \ + spin_unlock(lock); \ + __ret; \ +}) + /* alias for kfifo_in_spinlocked, will be removed in a future release */ #define kfifo_in_locked(fifo, buf, n, lock) \ kfifo_in_spinlocked(fifo, buf, n, lock) @@ -569,6 +620,28 @@ __kfifo_uint_must_check_helper( \ }) \ ) +/** + * kfifo_out_spinlocked_noirqsave - get data from the fifo using a spinlock + * for locking, don't disable interrupts + * @fifo: address of the fifo to be used + * @buf: pointer to the storage buffer + * @n: max. number of elements to get + * @lock: pointer to the spinlock to use for locking + * + * This is a variant of kfifo_out_spinlocked() which uses spin_lock/unlock() + * for locking and doesn't disable interrupts. + */ +#define kfifo_out_spinlocked_noirqsave(fifo, buf, n, lock) \ +__kfifo_uint_must_check_helper( \ +({ \ + unsigned int __ret; \ + spin_lock(lock); \ + __ret = kfifo_out(fifo, buf, n); \ + spin_unlock(lock); \ + __ret; \ +}) \ +) + /* alias for kfifo_out_spinlocked, will be removed in a future release */ #define kfifo_out_locked(fifo, buf, n, lock) \ kfifo_out_spinlocked(fifo, buf, n, lock) diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 0f9da966934e..8bbcaad7ef0f 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -165,7 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker, do { \ kthread_init_work(&(dwork)->work, (fn)); \ timer_setup(&(dwork)->timer, \ - kthread_delayed_work_timer_fn, 0); \ + kthread_delayed_work_timer_fn, \ + TIMER_IRQSAFE); \ } while (0) int kthread_worker_fn(void *worker_ptr); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index d1fb05135665..42d2e6ac35f2 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -216,14 +216,7 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, } } -/* - * The resolution of the clocks. The resolution value is returned in - * the clock_getres() system call to give application programmers an - * idea of the (in)accuracy of timers. Timer values are rounded up to - * this resolution values. - */ -#define LOW_RES_NSEC TICK_NSEC -#define KTIME_LOW_RES (LOW_RES_NSEC) +#include <vdso/ktime.h> static inline ktime_t ns_to_ktime(u64 ns) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e89eb67356cb..6d58beb65454 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -360,6 +360,10 @@ static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *mem return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); } +#ifndef KVM_DIRTY_LOG_MANUAL_CAPS +#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE +#endif + struct kvm_s390_adapter_int { u64 ind_addr; u64 summary_addr; @@ -431,11 +435,11 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) */ struct kvm_memslots { u64 generation; - struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; /* The mapping table from slot id to the index in memslots[]. */ short id_to_index[KVM_MEM_SLOTS_NUM]; atomic_t lru_slot; int used_slots; + struct kvm_memory_slot memslots[]; }; struct kvm { @@ -493,7 +497,7 @@ struct kvm { #endif long tlbs_dirty; struct list_head devices; - bool manual_dirty_log_protect; + u64 manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; struct srcu_struct srcu; @@ -527,6 +531,11 @@ struct kvm { #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) +{ + return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); +} + static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) { return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, @@ -572,10 +581,11 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) return vcpu->vcpu_idx; } -#define kvm_for_each_memslot(memslot, slots) \ - for (memslot = &slots->memslots[0]; \ - memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ - memslot++) +#define kvm_for_each_memslot(memslot, slots) \ + for (memslot = &slots->memslots[0]; \ + memslot < slots->memslots + slots->used_slots; memslot++) \ + if (WARN_ON_ONCE(!memslot->npages)) { \ + } else void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -635,12 +645,15 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) return __kvm_memslots(vcpu->kvm, as_id); } -static inline struct kvm_memory_slot * -id_to_memslot(struct kvm_memslots *slots, int id) +static inline +struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) { int index = slots->id_to_index[id]; struct kvm_memory_slot *slot; + if (index < 0) + return NULL; + slot = &slots->memslots[index]; WARN_ON(slot->id != id); @@ -669,10 +682,7 @@ int kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); int __kvm_set_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem); -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, - struct kvm_memory_slot *dont); -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, - unsigned long npages); +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -680,11 +690,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, enum kvm_mr_change change); void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, + struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change); -bool kvm_largepages_enabled(void); -void kvm_disable_largepages(void); /* flush all memory translations */ void kvm_arch_flush_shadow_all(struct kvm *kvm); /* flush memory translations pointing to 'slot' */ @@ -704,7 +712,6 @@ void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); void kvm_set_page_accessed(struct page *page); -kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable); @@ -819,23 +826,20 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); -int kvm_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log, int *is_dirty); - -int kvm_get_dirty_log_protect(struct kvm *kvm, - struct kvm_dirty_log *log, bool *flush); -int kvm_clear_dirty_log_protect(struct kvm *kvm, - struct kvm_clear_dirty_log *log, bool *flush); - void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask); - -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log); -int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, - struct kvm_clear_dirty_log *log); +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); + +#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot); +#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); +int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, + int *is_dirty, struct kvm_memory_slot **memslot); +#endif int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); @@ -882,13 +886,15 @@ void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); -int kvm_arch_hardware_setup(void); +int kvm_arch_hardware_setup(void *opaque); void kvm_arch_hardware_unsetup(void); -int kvm_arch_check_processor_compat(void); +int kvm_arch_check_processor_compat(void *opaque); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); +int kvm_arch_post_init_vm(struct kvm *kvm); +void kvm_arch_pre_destroy_vm(struct kvm *kvm); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* @@ -1016,6 +1022,8 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. * gfn_to_memslot() itself isn't here as an inline because that would * bloat other code too much. + * + * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * search_memslots(struct kvm_memslots *slots, gfn_t gfn) @@ -1024,6 +1032,9 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) int slot = atomic_read(&slots->lru_slot); struct kvm_memory_slot *memslots = slots->memslots; + if (unlikely(!slots->used_slots)) + return NULL; + if (gfn >= memslots[slot].base_gfn && gfn < memslots[slot].base_gfn + memslots[slot].npages) return &memslots[slot]; @@ -1342,7 +1353,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ struct kvm_vcpu *kvm_get_running_vcpu(void); -struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); +struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS bool kvm_arch_has_irq_bypass(void); diff --git a/include/linux/leds.h b/include/linux/leds.h index 75353e5f9d13..2451962d1ec5 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -25,6 +25,7 @@ struct device_node; * LED Core */ +/* This is obsolete/useless. We now support variable maximum brightness. */ enum led_brightness { LED_OFF = 0, LED_ON = 1, diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h deleted file mode 100644 index 93d101d28943..000000000000 --- a/include/linux/leds_pwm.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * PWM LED driver data - see drivers/leds/leds-pwm.c - */ -#ifndef __LINUX_LEDS_PWM_H -#define __LINUX_LEDS_PWM_H - -struct led_pwm { - const char *name; - const char *default_trigger; - unsigned pwm_id __deprecated; - u8 active_low; - unsigned max_brightness; - unsigned pwm_period_ns; -}; - -struct led_pwm_platform_data { - int num_leds; - struct led_pwm *leds; -}; - -#endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 2ca9b7056a82..cffa4714bfa8 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -57,8 +57,6 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ -#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) - #define ata_print_version_once(dev, version) \ ({ \ static bool __print_once; \ @@ -176,6 +174,7 @@ enum { ATA_DEV_NONE = 11, /* no device */ /* struct ata_link flags */ + /* NOTE: struct ata_force_param currently stores lflags in u16 */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ @@ -531,12 +530,14 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, unsigned long deadline); typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); -extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_unload_heads; +#ifdef CONFIG_SATA_HOST +extern struct device_attribute dev_attr_link_power_management_policy; extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; +#endif enum sw_activity { OFF, @@ -1020,10 +1021,6 @@ struct ata_timing { /* * Core layer - drivers/ata/libata-core.c */ -extern const unsigned long sata_deb_timing_normal[]; -extern const unsigned long sata_deb_timing_hotplug[]; -extern const unsigned long sata_deb_timing_long[]; - extern struct ata_port_operations ata_dummy_port_ops; extern const struct ata_port_info ata_dummy_port_info; @@ -1061,33 +1058,14 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf) (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT); } -static inline const unsigned long * -sata_ehc_deb_timing(struct ata_eh_context *ehc) -{ - if (ehc->i.flags & ATA_EHI_HOTPLUGGED) - return sata_deb_timing_hotplug; - else - return sata_deb_timing_normal; -} - static inline int ata_port_is_dummy(struct ata_port *ap) { return ap->ops == &ata_dummy_port_ops; } -extern int sata_set_spd(struct ata_link *link); extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)); -extern int sata_link_debounce(struct ata_link *link, - const unsigned long *params, unsigned long deadline); -extern int sata_link_resume(struct ata_link *link, const unsigned long *params, - unsigned long deadline); -extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, - bool spm_wakeup); -extern int sata_link_hardreset(struct ata_link *link, - const unsigned long *timing, unsigned long deadline, - bool *online, int (*check_ready)(struct ata_link *)); extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); @@ -1095,7 +1073,6 @@ extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports); -extern int ata_slave_link_init(struct ata_port *ap); extern void ata_host_get(struct ata_host *host); extern void ata_host_put(struct ata_host *host); extern int ata_host_start(struct ata_host *host); @@ -1117,22 +1094,6 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, unsigned int cmd, void __user *arg); -extern void ata_sas_port_destroy(struct ata_port *); -extern struct ata_port *ata_sas_port_alloc(struct ata_host *, - struct ata_port_info *, struct Scsi_Host *); -extern void ata_sas_async_probe(struct ata_port *ap); -extern int ata_sas_sync_probe(struct ata_port *ap); -extern int ata_sas_port_init(struct ata_port *); -extern int ata_sas_port_start(struct ata_port *ap); -extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); -extern void ata_sas_tport_delete(struct ata_port *ap); -extern void ata_sas_port_stop(struct ata_port *ap); -extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); -extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); -extern int sata_scr_valid(struct ata_link *link); -extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); -extern int sata_scr_write(struct ata_link *link, int reg, u32 val); -extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); extern bool ata_link_online(struct ata_link *link); extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM @@ -1153,9 +1114,6 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs); extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, unsigned long interval, unsigned long timeout); extern int atapi_cmd_type(u8 opcode); -extern void ata_tf_to_fis(const struct ata_taskfile *tf, - u8 pmp, int is_cmd, u8 *fis); -extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); extern unsigned long ata_pack_xfermask(unsigned long pio_mask, unsigned long mwdma_mask, unsigned long udma_mask); extern void ata_unpack_xfermask(unsigned long xfer_mask, @@ -1179,7 +1137,6 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s, extern unsigned int ata_do_dev_read_id(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); -extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); extern u64 ata_qc_get_active(struct ata_port *ap); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern int ata_std_bios_param(struct scsi_device *sdev, @@ -1196,7 +1153,96 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); + +/* + * SATA specific code - drivers/ata/libata-sata.c + */ +#ifdef CONFIG_SATA_HOST +extern const unsigned long sata_deb_timing_normal[]; +extern const unsigned long sata_deb_timing_hotplug[]; +extern const unsigned long sata_deb_timing_long[]; + +static inline const unsigned long * +sata_ehc_deb_timing(struct ata_eh_context *ehc) +{ + if (ehc->i.flags & ATA_EHI_HOTPLUGGED) + return sata_deb_timing_hotplug; + else + return sata_deb_timing_normal; +} + +extern int sata_scr_valid(struct ata_link *link); +extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); +extern int sata_scr_write(struct ata_link *link, int reg, u32 val); +extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); +extern int sata_set_spd(struct ata_link *link); +extern int sata_link_hardreset(struct ata_link *link, + const unsigned long *timing, unsigned long deadline, + bool *online, int (*check_ready)(struct ata_link *)); +extern int sata_link_resume(struct ata_link *link, const unsigned long *params, + unsigned long deadline); +extern void ata_eh_analyze_ncq_error(struct ata_link *link); +#else +static inline const unsigned long * +sata_ehc_deb_timing(struct ata_eh_context *ehc) +{ + return NULL; +} +static inline int sata_scr_valid(struct ata_link *link) { return 0; } +static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val) +{ + return -EOPNOTSUPP; +} +static inline int sata_scr_write(struct ata_link *link, int reg, u32 val) +{ + return -EOPNOTSUPP; +} +static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) +{ + return -EOPNOTSUPP; +} +static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; } +static inline int sata_link_hardreset(struct ata_link *link, + const unsigned long *timing, + unsigned long deadline, + bool *online, + int (*check_ready)(struct ata_link *)) +{ + if (online) + *online = false; + return -EOPNOTSUPP; +} +static inline int sata_link_resume(struct ata_link *link, + const unsigned long *params, + unsigned long deadline) +{ + return -EOPNOTSUPP; +} +static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { } +#endif +extern int sata_link_debounce(struct ata_link *link, + const unsigned long *params, unsigned long deadline); +extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup); +extern int ata_slave_link_init(struct ata_port *ap); +extern void ata_sas_port_destroy(struct ata_port *); +extern struct ata_port *ata_sas_port_alloc(struct ata_host *, + struct ata_port_info *, struct Scsi_Host *); +extern void ata_sas_async_probe(struct ata_port *ap); +extern int ata_sas_sync_probe(struct ata_port *ap); +extern int ata_sas_port_init(struct ata_port *); +extern int ata_sas_port_start(struct ata_port *ap); +extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); +extern void ata_sas_tport_delete(struct ata_port *ap); +extern void ata_sas_port_stop(struct ata_port *ap); +extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); +extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); +extern void ata_tf_to_fis(const struct ata_taskfile *tf, + u8 pmp, int is_cmd, u8 *fis); +extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); +extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active); extern bool sata_lpm_ignore_phy_events(struct ata_link *link); +extern int sata_async_notification(struct ata_port *ap); extern int ata_cable_40wire(struct ata_port *ap); extern int ata_cable_80wire(struct ata_port *ap); @@ -1206,12 +1252,6 @@ extern int ata_cable_unknown(struct ata_port *ap); /* Timing helpers */ extern unsigned int ata_pio_need_iordy(const struct ata_device *); -extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); -extern int ata_timing_compute(struct ata_device *, unsigned short, - struct ata_timing *, int, int); -extern void ata_timing_merge(const struct ata_timing *, - const struct ata_timing *, struct ata_timing *, - unsigned int); extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); /* PCI */ @@ -1295,14 +1335,12 @@ extern void ata_port_wait_eh(struct ata_port *ap); extern int ata_link_abort(struct ata_link *link); extern int ata_port_abort(struct ata_port *ap); extern int ata_port_freeze(struct ata_port *ap); -extern int sata_async_notification(struct ata_port *ap); extern void ata_eh_freeze_port(struct ata_port *ap); extern void ata_eh_thaw_port(struct ata_port *ap); extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); -extern void ata_eh_analyze_ncq_error(struct ata_link *link); extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, @@ -1343,7 +1381,7 @@ extern struct device_attribute *ata_common_sdev_attrs[]; * edge driver's module reference, otherwise the driver can be unloaded * even if the scsi_device is being accessed. */ -#define ATA_BASE_SHT(drv_name) \ +#define __ATA_BASE_SHT(drv_name) \ .module = THIS_MODULE, \ .name = drv_name, \ .ioctl = ata_scsi_ioctl, \ @@ -1357,12 +1395,20 @@ extern struct device_attribute *ata_common_sdev_attrs[]; .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ .bios_param = ata_std_bios_param, \ - .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ + .unlock_native_capacity = ata_scsi_unlock_native_capacity + +#define ATA_BASE_SHT(drv_name) \ + __ATA_BASE_SHT(drv_name), \ .sdev_attrs = ata_common_sdev_attrs +#ifdef CONFIG_SATA_HOST +extern struct device_attribute *ata_ncq_sdev_attrs[]; + #define ATA_NCQ_SHT(drv_name) \ - ATA_BASE_SHT(drv_name), \ + __ATA_BASE_SHT(drv_name), \ + .sdev_attrs = ata_ncq_sdev_attrs, \ .change_queue_depth = ata_scsi_change_queue_depth +#endif /* * PMP helpers @@ -1635,6 +1681,8 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev, */ static inline int ata_ncq_enabled(struct ata_device *dev) { + if (!IS_ENABLED(CONFIG_SATA_HOST)) + return 0; return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; } @@ -1804,6 +1852,16 @@ static inline int ata_dma_enabled(struct ata_device *adev) } /************************************************************************** + * PATA timings - drivers/ata/libata-pata-timings.c + */ +extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); +extern int ata_timing_compute(struct ata_device *, unsigned short, + struct ata_timing *, int, int); +extern void ata_timing_merge(const struct ata_timing *, + const struct ata_timing *, struct ata_timing *, + unsigned int); + +/************************************************************************** * PMP - drivers/ata/libata-pmp.c */ #ifdef CONFIG_SATA_PMP diff --git a/include/linux/limits.h b/include/linux/limits.h index 76afcd24ff8c..b568b9c30bbf 100644 --- a/include/linux/limits.h +++ b/include/linux/limits.h @@ -4,19 +4,8 @@ #include <uapi/linux/limits.h> #include <linux/types.h> +#include <vdso/limits.h> -#define USHRT_MAX ((unsigned short)~0U) -#define SHRT_MAX ((short)(USHRT_MAX >> 1)) -#define SHRT_MIN ((short)(-SHRT_MAX - 1)) -#define INT_MAX ((int)(~0U >> 1)) -#define INT_MIN (-INT_MAX - 1) -#define UINT_MAX (~0U) -#define LONG_MAX ((long)(~0UL >> 1)) -#define LONG_MIN (-LONG_MAX - 1) -#define ULONG_MAX (~0UL) -#define LLONG_MAX ((long long)(~0ULL >> 1)) -#define LLONG_MIN (-LLONG_MAX - 1) -#define ULLONG_MAX (~0ULL) #define SIZE_MAX (~(size_t)0) #define PHYS_ADDR_MAX (~(phys_addr_t)0) @@ -27,6 +16,7 @@ #define S16_MAX ((s16)(U16_MAX >> 1)) #define S16_MIN ((s16)(-S16_MAX - 1)) #define U32_MAX ((u32)~0U) +#define U32_MIN ((u32)0) #define S32_MAX ((s32)(U32_MAX >> 1)) #define S32_MIN ((s32)(-S32_MAX - 1)) #define U64_MAX ((u64)~0ULL) diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index fe740031339d..c664c27a29a0 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -71,7 +71,7 @@ static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) __change_bit(nr, addr); } -static inline int linkmode_test_bit(int nr, volatile unsigned long *addr) +static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) { return test_bit(nr, addr); } @@ -88,4 +88,10 @@ static inline int linkmode_subset(const unsigned long *src1, return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } +void linkmode_resolve_pause(const unsigned long *local_adv, + const unsigned long *partner_adv, + bool *tx_pause, bool *rx_pause); + +void linkmode_set_pause(unsigned long *advertisement, bool tx, bool rx); + #endif /* __LINKMODE_H */ diff --git a/include/linux/list.h b/include/linux/list.h index 884216db3246..aff44d34f4e4 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -989,7 +989,7 @@ static inline void hlist_move_list(struct hlist_head *old, /** * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. - * @n: another &struct hlist_node to use as temporary storage + * @n: a &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 664f52c6dd4c..206774ac6946 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -21,6 +21,22 @@ extern int lock_stat; #include <linux/types.h> +enum lockdep_wait_type { + LD_WAIT_INV = 0, /* not checked, catch all */ + + LD_WAIT_FREE, /* wait free, rcu etc.. */ + LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ + +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING + LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ +#else + LD_WAIT_CONFIG = LD_WAIT_SPIN, +#endif + LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ + + LD_WAIT_MAX, /* must be last */ +}; + #ifdef CONFIG_LOCKDEP #include <linux/linkage.h> @@ -111,6 +127,9 @@ struct lock_class { int name_version; const char *name; + short wait_type_inner; + short wait_type_outer; + #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; unsigned long contending_point[LOCKSTAT_POINTS]; @@ -158,6 +177,8 @@ struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; const char *name; + short wait_type_outer; /* can be taken in this context */ + short wait_type_inner; /* presents this context */ #ifdef CONFIG_LOCK_STAT int cpu; unsigned long ip; @@ -299,8 +320,21 @@ extern void lockdep_unregister_key(struct lock_class_key *key); * to lockdep: */ -extern void lockdep_init_map(struct lockdep_map *lock, const char *name, - struct lock_class_key *key, int subclass); +extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass, short inner, short outer); + +static inline void +lockdep_init_map_wait(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass, short inner) +{ + lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); +} + +static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, int subclass) +{ + lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); +} /* * Reinitialize a lock key - for cases where there is special locking or @@ -308,18 +342,29 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * of dependencies wrong: they are either too broad (they need a class-split) * or they are too narrow (they suffer from a false class-split): */ -#define lockdep_set_class(lock, key) \ - lockdep_init_map(&(lock)->dep_map, #key, key, 0) -#define lockdep_set_class_and_name(lock, key, name) \ - lockdep_init_map(&(lock)->dep_map, name, key, 0) -#define lockdep_set_class_and_subclass(lock, key, sub) \ - lockdep_init_map(&(lock)->dep_map, #key, key, sub) -#define lockdep_set_subclass(lock, sub) \ - lockdep_init_map(&(lock)->dep_map, #lock, \ - (lock)->dep_map.key, sub) +#define lockdep_set_class(lock, key) \ + lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer) + +#define lockdep_set_class_and_name(lock, key, name) \ + lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer) + +#define lockdep_set_class_and_subclass(lock, key, sub) \ + lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer) + +#define lockdep_set_subclass(lock, sub) \ + lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + (lock)->dep_map.wait_type_inner, \ + (lock)->dep_map.wait_type_outer) #define lockdep_set_novalidate_class(lock) \ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) + /* * Compare locking classes */ @@ -432,6 +477,10 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) +# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ + do { (void)(name); (void)(key); } while (0) +# define lockdep_init_map_wait(lock, name, key, sub, inner) \ + do { (void)(name); (void)(key); } while (0) # define lockdep_init_map(lock, name, key, sub) \ do { (void)(name); (void)(key); } while (0) # define lockdep_set_class(lock, key) do { (void)(key); } while (0) @@ -662,6 +711,21 @@ do { \ # define lockdep_assert_in_irq() do { } while (0) #endif +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING + +# define lockdep_assert_RT_in_threaded_ctx() do { \ + WARN_ONCE(debug_locks && !current->lockdep_recursion && \ + current->hardirq_context && \ + !(current->hardirq_threaded || current->irq_config), \ + "Not in threaded context on PREEMPT_RT as expected\n"); \ +} while (0) + +#else + +# define lockdep_assert_RT_in_threaded_ctx() do { } while (0) + +#endif + #ifdef CONFIG_LOCKDEP void lockdep_rcu_suspicious(const char *file, const int line, const char *s); #else diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h new file mode 100644 index 000000000000..9cd4455528e5 --- /dev/null +++ b/include/linux/lsm_hook_defs.h @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Linux Security Module Hook declarations. + * + * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> + * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> + * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> + * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * Copyright (C) 2015 Intel Corporation. + * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> + * Copyright (C) 2016 Mellanox Techonologies + * Copyright (C) 2020 Google LLC. + */ + +/* + * The macro LSM_HOOK is used to define the data structures required by the + * the LSM framework using the pattern: + * + * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...) + * + * struct security_hook_heads { + * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; + * #include <linux/lsm_hook_defs.h> + * #undef LSM_HOOK + * }; + */ +LSM_HOOK(int, 0, binder_set_context_mgr, struct task_struct *mgr) +LSM_HOOK(int, 0, binder_transaction, struct task_struct *from, + struct task_struct *to) +LSM_HOOK(int, 0, binder_transfer_binder, struct task_struct *from, + struct task_struct *to) +LSM_HOOK(int, 0, binder_transfer_file, struct task_struct *from, + struct task_struct *to, struct file *file) +LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child, + unsigned int mode) +LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent) +LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective, + kernel_cap_t *inheritable, kernel_cap_t *permitted) +LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old, + const kernel_cap_t *effective, const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) +LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns, + int cap, unsigned int opts) +LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb) +LSM_HOOK(int, 0, quota_on, struct dentry *dentry) +LSM_HOOK(int, 0, syslog, int type) +LSM_HOOK(int, 0, settime, const struct timespec64 *ts, + const struct timezone *tz) +LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) +LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) +LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, + struct fs_context *src_sc) +LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc, + struct fs_parameter *param) +LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) +LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) +LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) +LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb) +LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) +LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry) +LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path, + const char *type, unsigned long flags, void *data) +LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags) +LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path, + const struct path *new_path) +LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts, + unsigned long kern_flags, unsigned long *set_kern_flags) +LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb, + struct super_block *newsb, unsigned long kern_flags, + unsigned long *set_kern_flags) +LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val, + int len, void **mnt_opts) +LSM_HOOK(int, 0, move_mount, const struct path *from_path, + const struct path *to_path) +LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry, + int mode, const struct qstr *name, void **ctx, u32 *ctxlen) +LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode, + struct qstr *name, const struct cred *old, struct cred *new) + +#ifdef CONFIG_SECURITY_PATH +LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry) +LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry, + umode_t mode) +LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry) +LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry, + umode_t mode, unsigned int dev) +LSM_HOOK(int, 0, path_truncate, const struct path *path) +LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry, + const char *old_name) +LSM_HOOK(int, 0, path_link, struct dentry *old_dentry, + const struct path *new_dir, struct dentry *new_dentry) +LSM_HOOK(int, 0, path_rename, const struct path *old_dir, + struct dentry *old_dentry, const struct path *new_dir, + struct dentry *new_dentry) +LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode) +LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid) +LSM_HOOK(int, 0, path_chroot, const struct path *path) +#endif /* CONFIG_SECURITY_PATH */ + +/* Needed for inode based security check */ +LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask, + unsigned int obj_type) +LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode) +LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) +LSM_HOOK(int, 0, inode_init_security, struct inode *inode, + struct inode *dir, const struct qstr *qstr, const char **name, + void **value, size_t *len) +LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry, + umode_t mode) +LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry) +LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry) +LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry, + const char *old_name) +LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry, + umode_t mode) +LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry) +LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t dev) +LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry) +LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode, + bool rcu) +LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask) +LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr) +LSM_HOOK(int, 0, inode_getattr, const struct path *path) +LSM_HOOK(int, 0, inode_setxattr, struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) +LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name) +LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry) +LSM_HOOK(int, 0, inode_removexattr, struct dentry *dentry, const char *name) +LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry) +LSM_HOOK(int, 0, inode_killpriv, struct dentry *dentry) +LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct inode *inode, + const char *name, void **buffer, bool alloc) +LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, + const char *name, const void *value, size_t size, int flags) +LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, + size_t buffer_size) +LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) +LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) +LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name) +LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, + struct kernfs_node *kn) +LSM_HOOK(int, 0, file_permission, struct file *file, int mask) +LSM_HOOK(int, 0, file_alloc_security, struct file *file) +LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file) +LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, + unsigned long arg) +LSM_HOOK(int, 0, mmap_addr, unsigned long addr) +LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot) +LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd) +LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd, + unsigned long arg) +LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file) +LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk, + struct fown_struct *fown, int sig) +LSM_HOOK(int, 0, file_receive, struct file *file) +LSM_HOOK(int, 0, file_open, struct file *file) +LSM_HOOK(int, 0, task_alloc, struct task_struct *task, + unsigned long clone_flags) +LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task) +LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp) +LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred) +LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old, + gfp_t gfp) +LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new, + const struct cred *old) +LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid) +LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid) +LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode) +LSM_HOOK(int, 0, kernel_module_request, char *kmod_name) +LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id) +LSM_HOOK(int, 0, kernel_read_file, struct file *file, + enum kernel_read_file_id id) +LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, + loff_t size, enum kernel_read_file_id id) +LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, + int flags) +LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) +LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) +LSM_HOOK(int, 0, task_getsid, struct task_struct *p) +LSM_HOOK(void, LSM_RET_VOID, task_getsecid, struct task_struct *p, u32 *secid) +LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) +LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) +LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) +LSM_HOOK(int, 0, task_prlimit, const struct cred *cred, + const struct cred *tcred, unsigned int flags) +LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource, + struct rlimit *new_rlim) +LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p) +LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p) +LSM_HOOK(int, 0, task_movememory, struct task_struct *p) +LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info, + int sig, const struct cred *cred) +LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2, + unsigned long arg3, unsigned long arg4, unsigned long arg5) +LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p, + struct inode *inode) +LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag) +LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp, + u32 *secid) +LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg) +LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg) +LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm) +LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security, + struct kern_ipc_perm *perm) +LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg) +LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd) +LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm, + struct msg_msg *msg, int msqflg) +LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm, + struct msg_msg *msg, struct task_struct *target, long type, int mode) +LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm) +LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm) +LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg) +LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd) +LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr, + int shmflg) +LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm) +LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm) +LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg) +LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd) +LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops, + unsigned nsops, int alter) +LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb) +LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry, + struct inode *inode) +LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name, + char **value) +LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) +LSM_HOOK(int, 0, ismaclabel, const char *name) +LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata, + u32 *seclen) +LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) +LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) +LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen) +LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) +LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, + u32 *ctxlen) + +#ifdef CONFIG_SECURITY_NETWORK +LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, + struct sock *newsk) +LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other) +LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern) +LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb) +LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address, + int addrlen) +LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address, + int addrlen) +LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog) +LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock) +LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg, + int size) +LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg, + int size, int flags) +LSM_HOOK(int, 0, socket_getsockname, struct socket *sock) +LSM_HOOK(int, 0, socket_getpeername, struct socket *sock) +LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) +LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) +LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) +LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) +LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock, + char __user *optval, int __user *optlen, unsigned len) +LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock, + struct sk_buff *skb, u32 *secid) +LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) +LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) +LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, + struct sock *newsk) +LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) +LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb, + struct request_sock *req) +LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk, + const struct request_sock *req) +LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk, + struct sk_buff *skb) +LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid) +LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) +LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) +LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, + struct flowi *fl) +LSM_HOOK(int, 0, tun_dev_alloc_security, void **security) +LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security) +LSM_HOOK(int, 0, tun_dev_create, void) +LSM_HOOK(int, 0, tun_dev_attach_queue, void *security) +LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security) +LSM_HOOK(int, 0, tun_dev_open, void *security) +LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep, + struct sk_buff *skb) +LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, + struct sockaddr *address, int addrlen) +LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep, + struct sock *sk, struct sock *newsk) +#endif /* CONFIG_SECURITY_NETWORK */ + +#ifdef CONFIG_SECURITY_INFINIBAND +LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey) +LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name, + u8 port_num) +LSM_HOOK(int, 0, ib_alloc_security, void **sec) +LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec) +#endif /* CONFIG_SECURITY_INFINIBAND */ + +#ifdef CONFIG_SECURITY_NETWORK_XFRM +LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) +LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx, + struct xfrm_sec_ctx **new_ctx) +LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security, + struct xfrm_sec_ctx *ctx) +LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx) +LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) +LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid) +LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x) +LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) +LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, + u8 dir) +LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, + struct xfrm_policy *xp, const struct flowi *fl) +LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, + int ckall) +#endif /* CONFIG_SECURITY_NETWORK_XFRM */ + +/* key management security hooks */ +#ifdef CONFIG_KEYS +LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, + unsigned long flags) +LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key) +LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, + unsigned perm) +LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer) +#endif /* CONFIG_KEYS */ + +#ifdef CONFIG_AUDIT +LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, + void **lsmrule) +LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) +LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule) +LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) +#endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_BPF_SYSCALL +LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size) +LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode) +LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog) +LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map) +LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map) +LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux) +LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux) +#endif /* CONFIG_BPF_SYSCALL */ + +LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) + +#ifdef CONFIG_PERF_EVENTS +LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type) +LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event) +LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event) +LSM_HOOK(int, 0, perf_event_read, struct perf_event *event) +LSM_HOOK(int, 0, perf_event_write, struct perf_event *event) +#endif /* CONFIG_PERF_EVENTS */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 20d8cf194fb7..988ca0df7824 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -103,6 +103,10 @@ * @sb_free_security: * Deallocate and clear the sb->s_security field. * @sb contains the super_block structure to be modified. + * @sb_free_mnt_opts: + * Free memory associated with @mnt_ops. + * @sb_eat_lsm_opts: + * Eat (scan @orig options) and save them in @mnt_opts. * @sb_statfs: * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. @@ -136,6 +140,10 @@ * @sb superblock being remounted * @data contains the filesystem-specific data. * Return 0 if permission is granted. + * @sb_kern_mount: + * Mount this @sb if allowed by permissions. + * @sb_show_options: + * Show (print on @m) mount options for this @sb. * @sb_umount: * Check permission before the @mnt file system is unmounted. * @mnt contains the mounted file system. @@ -155,6 +163,8 @@ * Copy all security options from a given superblock to another * @oldsb old superblock which contain information to clone * @newsb new superblock which needs filled in + * @sb_add_mnt_opt: + * Add one mount @option to @mnt_opts. * @sb_parse_opts_str: * Parse a string of security data filling in the opts structure * @options string containing all mount options known by the LSM @@ -451,6 +461,12 @@ * security module does not know about attribute or a negative error code * to abort the copy up. Note that the caller is responsible for reading * and writing the xattrs as this hook is merely a filter. + * @d_instantiate: + * Fill in @inode security information for a @dentry if allowed. + * @getprocattr: + * Read attribute @name for process @p and store it into @value if allowed. + * @setprocattr: + * Write (set) attribute @name to @value, size @size if allowed. * * Security hooks for kernfs node operations * @@ -1113,6 +1129,7 @@ * In case of failure, @secid will be set to zero. * * Security hooks for individual messages held in System V IPC message queues + * * @msg_msg_alloc_security: * Allocate and attach a security structure to the msg->security field. * The security field is initialized to NULL when the structure is first @@ -1302,6 +1319,10 @@ * @cap contains the capability <include/linux/capability.h>. * @opts contains options for the capable check <include/linux/security.h> * Return 0 if the capability is granted for @tsk. + * @quotactl: + * Check whether the quotactl syscall is allowed for this @sb. + * @quota_on: + * Check whether QUOTAON is allowed for this @dentry. * @syslog: * Check permission before accessing the kernel message ring or changing * logging to the console. @@ -1449,632 +1470,35 @@ * @bpf_prog_free_security: * Clean up the security information stored inside bpf prog. * - * @locked_down + * @locked_down: * Determine whether a kernel feature that potentially enables arbitrary * code execution in kernel space should be permitted. * * @what: kernel feature being accessed + * + * Security hooks for perf events + * + * @perf_event_open: + * Check whether the @type of perf_event_open syscall is allowed. + * @perf_event_alloc: + * Allocate and save perf_event security info. + * @perf_event_free: + * Release (free) perf_event security info. + * @perf_event_read: + * Read perf_event security info if allowed. + * @perf_event_write: + * Write perf_event security info if allowed. */ union security_list_options { - int (*binder_set_context_mgr)(struct task_struct *mgr); - int (*binder_transaction)(struct task_struct *from, - struct task_struct *to); - int (*binder_transfer_binder)(struct task_struct *from, - struct task_struct *to); - int (*binder_transfer_file)(struct task_struct *from, - struct task_struct *to, - struct file *file); - - int (*ptrace_access_check)(struct task_struct *child, - unsigned int mode); - int (*ptrace_traceme)(struct task_struct *parent); - int (*capget)(struct task_struct *target, kernel_cap_t *effective, - kernel_cap_t *inheritable, kernel_cap_t *permitted); - int (*capset)(struct cred *new, const struct cred *old, - const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); - int (*capable)(const struct cred *cred, - struct user_namespace *ns, - int cap, - unsigned int opts); - int (*quotactl)(int cmds, int type, int id, struct super_block *sb); - int (*quota_on)(struct dentry *dentry); - int (*syslog)(int type); - int (*settime)(const struct timespec64 *ts, const struct timezone *tz); - int (*vm_enough_memory)(struct mm_struct *mm, long pages); - - int (*bprm_set_creds)(struct linux_binprm *bprm); - int (*bprm_check_security)(struct linux_binprm *bprm); - void (*bprm_committing_creds)(struct linux_binprm *bprm); - void (*bprm_committed_creds)(struct linux_binprm *bprm); - - int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc); - int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param); - - int (*sb_alloc_security)(struct super_block *sb); - void (*sb_free_security)(struct super_block *sb); - void (*sb_free_mnt_opts)(void *mnt_opts); - int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts); - int (*sb_remount)(struct super_block *sb, void *mnt_opts); - int (*sb_kern_mount)(struct super_block *sb); - int (*sb_show_options)(struct seq_file *m, struct super_block *sb); - int (*sb_statfs)(struct dentry *dentry); - int (*sb_mount)(const char *dev_name, const struct path *path, - const char *type, unsigned long flags, void *data); - int (*sb_umount)(struct vfsmount *mnt, int flags); - int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); - int (*sb_set_mnt_opts)(struct super_block *sb, - void *mnt_opts, - unsigned long kern_flags, - unsigned long *set_kern_flags); - int (*sb_clone_mnt_opts)(const struct super_block *oldsb, - struct super_block *newsb, - unsigned long kern_flags, - unsigned long *set_kern_flags); - int (*sb_add_mnt_opt)(const char *option, const char *val, int len, - void **mnt_opts); - int (*move_mount)(const struct path *from_path, const struct path *to_path); - int (*dentry_init_security)(struct dentry *dentry, int mode, - const struct qstr *name, void **ctx, - u32 *ctxlen); - int (*dentry_create_files_as)(struct dentry *dentry, int mode, - struct qstr *name, - const struct cred *old, - struct cred *new); - - -#ifdef CONFIG_SECURITY_PATH - int (*path_unlink)(const struct path *dir, struct dentry *dentry); - int (*path_mkdir)(const struct path *dir, struct dentry *dentry, - umode_t mode); - int (*path_rmdir)(const struct path *dir, struct dentry *dentry); - int (*path_mknod)(const struct path *dir, struct dentry *dentry, - umode_t mode, unsigned int dev); - int (*path_truncate)(const struct path *path); - int (*path_symlink)(const struct path *dir, struct dentry *dentry, - const char *old_name); - int (*path_link)(struct dentry *old_dentry, const struct path *new_dir, - struct dentry *new_dentry); - int (*path_rename)(const struct path *old_dir, struct dentry *old_dentry, - const struct path *new_dir, - struct dentry *new_dentry); - int (*path_chmod)(const struct path *path, umode_t mode); - int (*path_chown)(const struct path *path, kuid_t uid, kgid_t gid); - int (*path_chroot)(const struct path *path); -#endif - /* Needed for inode based security check */ - int (*path_notify)(const struct path *path, u64 mask, - unsigned int obj_type); - int (*inode_alloc_security)(struct inode *inode); - void (*inode_free_security)(struct inode *inode); - int (*inode_init_security)(struct inode *inode, struct inode *dir, - const struct qstr *qstr, - const char **name, void **value, - size_t *len); - int (*inode_create)(struct inode *dir, struct dentry *dentry, - umode_t mode); - int (*inode_link)(struct dentry *old_dentry, struct inode *dir, - struct dentry *new_dentry); - int (*inode_unlink)(struct inode *dir, struct dentry *dentry); - int (*inode_symlink)(struct inode *dir, struct dentry *dentry, - const char *old_name); - int (*inode_mkdir)(struct inode *dir, struct dentry *dentry, - umode_t mode); - int (*inode_rmdir)(struct inode *dir, struct dentry *dentry); - int (*inode_mknod)(struct inode *dir, struct dentry *dentry, - umode_t mode, dev_t dev); - int (*inode_rename)(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry); - int (*inode_readlink)(struct dentry *dentry); - int (*inode_follow_link)(struct dentry *dentry, struct inode *inode, - bool rcu); - int (*inode_permission)(struct inode *inode, int mask); - int (*inode_setattr)(struct dentry *dentry, struct iattr *attr); - int (*inode_getattr)(const struct path *path); - int (*inode_setxattr)(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); - void (*inode_post_setxattr)(struct dentry *dentry, const char *name, - const void *value, size_t size, - int flags); - int (*inode_getxattr)(struct dentry *dentry, const char *name); - int (*inode_listxattr)(struct dentry *dentry); - int (*inode_removexattr)(struct dentry *dentry, const char *name); - int (*inode_need_killpriv)(struct dentry *dentry); - int (*inode_killpriv)(struct dentry *dentry); - int (*inode_getsecurity)(struct inode *inode, const char *name, - void **buffer, bool alloc); - int (*inode_setsecurity)(struct inode *inode, const char *name, - const void *value, size_t size, - int flags); - int (*inode_listsecurity)(struct inode *inode, char *buffer, - size_t buffer_size); - void (*inode_getsecid)(struct inode *inode, u32 *secid); - int (*inode_copy_up)(struct dentry *src, struct cred **new); - int (*inode_copy_up_xattr)(const char *name); - - int (*kernfs_init_security)(struct kernfs_node *kn_dir, - struct kernfs_node *kn); - - int (*file_permission)(struct file *file, int mask); - int (*file_alloc_security)(struct file *file); - void (*file_free_security)(struct file *file); - int (*file_ioctl)(struct file *file, unsigned int cmd, - unsigned long arg); - int (*mmap_addr)(unsigned long addr); - int (*mmap_file)(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags); - int (*file_mprotect)(struct vm_area_struct *vma, unsigned long reqprot, - unsigned long prot); - int (*file_lock)(struct file *file, unsigned int cmd); - int (*file_fcntl)(struct file *file, unsigned int cmd, - unsigned long arg); - void (*file_set_fowner)(struct file *file); - int (*file_send_sigiotask)(struct task_struct *tsk, - struct fown_struct *fown, int sig); - int (*file_receive)(struct file *file); - int (*file_open)(struct file *file); - - int (*task_alloc)(struct task_struct *task, unsigned long clone_flags); - void (*task_free)(struct task_struct *task); - int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp); - void (*cred_free)(struct cred *cred); - int (*cred_prepare)(struct cred *new, const struct cred *old, - gfp_t gfp); - void (*cred_transfer)(struct cred *new, const struct cred *old); - void (*cred_getsecid)(const struct cred *c, u32 *secid); - int (*kernel_act_as)(struct cred *new, u32 secid); - int (*kernel_create_files_as)(struct cred *new, struct inode *inode); - int (*kernel_module_request)(char *kmod_name); - int (*kernel_load_data)(enum kernel_load_data_id id); - int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id); - int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size, - enum kernel_read_file_id id); - int (*task_fix_setuid)(struct cred *new, const struct cred *old, - int flags); - int (*task_setpgid)(struct task_struct *p, pid_t pgid); - int (*task_getpgid)(struct task_struct *p); - int (*task_getsid)(struct task_struct *p); - void (*task_getsecid)(struct task_struct *p, u32 *secid); - int (*task_setnice)(struct task_struct *p, int nice); - int (*task_setioprio)(struct task_struct *p, int ioprio); - int (*task_getioprio)(struct task_struct *p); - int (*task_prlimit)(const struct cred *cred, const struct cred *tcred, - unsigned int flags); - int (*task_setrlimit)(struct task_struct *p, unsigned int resource, - struct rlimit *new_rlim); - int (*task_setscheduler)(struct task_struct *p); - int (*task_getscheduler)(struct task_struct *p); - int (*task_movememory)(struct task_struct *p); - int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info, - int sig, const struct cred *cred); - int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5); - void (*task_to_inode)(struct task_struct *p, struct inode *inode); - - int (*ipc_permission)(struct kern_ipc_perm *ipcp, short flag); - void (*ipc_getsecid)(struct kern_ipc_perm *ipcp, u32 *secid); - - int (*msg_msg_alloc_security)(struct msg_msg *msg); - void (*msg_msg_free_security)(struct msg_msg *msg); - - int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm); - void (*msg_queue_free_security)(struct kern_ipc_perm *perm); - int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg); - int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd); - int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg, - int msqflg); - int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg, - struct task_struct *target, long type, - int mode); - - int (*shm_alloc_security)(struct kern_ipc_perm *perm); - void (*shm_free_security)(struct kern_ipc_perm *perm); - int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg); - int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd); - int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr, - int shmflg); - - int (*sem_alloc_security)(struct kern_ipc_perm *perm); - void (*sem_free_security)(struct kern_ipc_perm *perm); - int (*sem_associate)(struct kern_ipc_perm *perm, int semflg); - int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd); - int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops, - unsigned nsops, int alter); - - int (*netlink_send)(struct sock *sk, struct sk_buff *skb); - - void (*d_instantiate)(struct dentry *dentry, struct inode *inode); - - int (*getprocattr)(struct task_struct *p, char *name, char **value); - int (*setprocattr)(const char *name, void *value, size_t size); - int (*ismaclabel)(const char *name); - int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); - int (*secctx_to_secid)(const char *secdata, u32 seclen, u32 *secid); - void (*release_secctx)(char *secdata, u32 seclen); - - void (*inode_invalidate_secctx)(struct inode *inode); - int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); - int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); - int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); - -#ifdef CONFIG_SECURITY_NETWORK - int (*unix_stream_connect)(struct sock *sock, struct sock *other, - struct sock *newsk); - int (*unix_may_send)(struct socket *sock, struct socket *other); - - int (*socket_create)(int family, int type, int protocol, int kern); - int (*socket_post_create)(struct socket *sock, int family, int type, - int protocol, int kern); - int (*socket_socketpair)(struct socket *socka, struct socket *sockb); - int (*socket_bind)(struct socket *sock, struct sockaddr *address, - int addrlen); - int (*socket_connect)(struct socket *sock, struct sockaddr *address, - int addrlen); - int (*socket_listen)(struct socket *sock, int backlog); - int (*socket_accept)(struct socket *sock, struct socket *newsock); - int (*socket_sendmsg)(struct socket *sock, struct msghdr *msg, - int size); - int (*socket_recvmsg)(struct socket *sock, struct msghdr *msg, - int size, int flags); - int (*socket_getsockname)(struct socket *sock); - int (*socket_getpeername)(struct socket *sock); - int (*socket_getsockopt)(struct socket *sock, int level, int optname); - int (*socket_setsockopt)(struct socket *sock, int level, int optname); - int (*socket_shutdown)(struct socket *sock, int how); - int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb); - int (*socket_getpeersec_stream)(struct socket *sock, - char __user *optval, - int __user *optlen, unsigned len); - int (*socket_getpeersec_dgram)(struct socket *sock, - struct sk_buff *skb, u32 *secid); - int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority); - void (*sk_free_security)(struct sock *sk); - void (*sk_clone_security)(const struct sock *sk, struct sock *newsk); - void (*sk_getsecid)(struct sock *sk, u32 *secid); - void (*sock_graft)(struct sock *sk, struct socket *parent); - int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, - struct request_sock *req); - void (*inet_csk_clone)(struct sock *newsk, - const struct request_sock *req); - void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); - int (*secmark_relabel_packet)(u32 secid); - void (*secmark_refcount_inc)(void); - void (*secmark_refcount_dec)(void); - void (*req_classify_flow)(const struct request_sock *req, - struct flowi *fl); - int (*tun_dev_alloc_security)(void **security); - void (*tun_dev_free_security)(void *security); - int (*tun_dev_create)(void); - int (*tun_dev_attach_queue)(void *security); - int (*tun_dev_attach)(struct sock *sk, void *security); - int (*tun_dev_open)(void *security); - int (*sctp_assoc_request)(struct sctp_endpoint *ep, - struct sk_buff *skb); - int (*sctp_bind_connect)(struct sock *sk, int optname, - struct sockaddr *address, int addrlen); - void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk, - struct sock *newsk); -#endif /* CONFIG_SECURITY_NETWORK */ - -#ifdef CONFIG_SECURITY_INFINIBAND - int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey); - int (*ib_endport_manage_subnet)(void *sec, const char *dev_name, - u8 port_num); - int (*ib_alloc_security)(void **sec); - void (*ib_free_security)(void *sec); -#endif /* CONFIG_SECURITY_INFINIBAND */ - -#ifdef CONFIG_SECURITY_NETWORK_XFRM - int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx, - gfp_t gfp); - int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *old_ctx, - struct xfrm_sec_ctx **new_ctx); - void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *ctx); - int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *ctx); - int (*xfrm_state_alloc)(struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx); - int (*xfrm_state_alloc_acquire)(struct xfrm_state *x, - struct xfrm_sec_ctx *polsec, - u32 secid); - void (*xfrm_state_free_security)(struct xfrm_state *x); - int (*xfrm_state_delete_security)(struct xfrm_state *x); - int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, - u8 dir); - int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, - struct xfrm_policy *xp, - const struct flowi *fl); - int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); -#endif /* CONFIG_SECURITY_NETWORK_XFRM */ - - /* key management security hooks */ -#ifdef CONFIG_KEYS - int (*key_alloc)(struct key *key, const struct cred *cred, - unsigned long flags); - void (*key_free)(struct key *key); - int (*key_permission)(key_ref_t key_ref, const struct cred *cred, - unsigned perm); - int (*key_getsecurity)(struct key *key, char **_buffer); -#endif /* CONFIG_KEYS */ - -#ifdef CONFIG_AUDIT - int (*audit_rule_init)(u32 field, u32 op, char *rulestr, - void **lsmrule); - int (*audit_rule_known)(struct audit_krule *krule); - int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule); - void (*audit_rule_free)(void *lsmrule); -#endif /* CONFIG_AUDIT */ - -#ifdef CONFIG_BPF_SYSCALL - int (*bpf)(int cmd, union bpf_attr *attr, - unsigned int size); - int (*bpf_map)(struct bpf_map *map, fmode_t fmode); - int (*bpf_prog)(struct bpf_prog *prog); - int (*bpf_map_alloc_security)(struct bpf_map *map); - void (*bpf_map_free_security)(struct bpf_map *map); - int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); - void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); -#endif /* CONFIG_BPF_SYSCALL */ - int (*locked_down)(enum lockdown_reason what); -#ifdef CONFIG_PERF_EVENTS - int (*perf_event_open)(struct perf_event_attr *attr, int type); - int (*perf_event_alloc)(struct perf_event *event); - void (*perf_event_free)(struct perf_event *event); - int (*perf_event_read)(struct perf_event *event); - int (*perf_event_write)(struct perf_event *event); - -#endif + #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__); + #include "lsm_hook_defs.h" + #undef LSM_HOOK }; struct security_hook_heads { - struct hlist_head binder_set_context_mgr; - struct hlist_head binder_transaction; - struct hlist_head binder_transfer_binder; - struct hlist_head binder_transfer_file; - struct hlist_head ptrace_access_check; - struct hlist_head ptrace_traceme; - struct hlist_head capget; - struct hlist_head capset; - struct hlist_head capable; - struct hlist_head quotactl; - struct hlist_head quota_on; - struct hlist_head syslog; - struct hlist_head settime; - struct hlist_head vm_enough_memory; - struct hlist_head bprm_set_creds; - struct hlist_head bprm_check_security; - struct hlist_head bprm_committing_creds; - struct hlist_head bprm_committed_creds; - struct hlist_head fs_context_dup; - struct hlist_head fs_context_parse_param; - struct hlist_head sb_alloc_security; - struct hlist_head sb_free_security; - struct hlist_head sb_free_mnt_opts; - struct hlist_head sb_eat_lsm_opts; - struct hlist_head sb_remount; - struct hlist_head sb_kern_mount; - struct hlist_head sb_show_options; - struct hlist_head sb_statfs; - struct hlist_head sb_mount; - struct hlist_head sb_umount; - struct hlist_head sb_pivotroot; - struct hlist_head sb_set_mnt_opts; - struct hlist_head sb_clone_mnt_opts; - struct hlist_head sb_add_mnt_opt; - struct hlist_head move_mount; - struct hlist_head dentry_init_security; - struct hlist_head dentry_create_files_as; -#ifdef CONFIG_SECURITY_PATH - struct hlist_head path_unlink; - struct hlist_head path_mkdir; - struct hlist_head path_rmdir; - struct hlist_head path_mknod; - struct hlist_head path_truncate; - struct hlist_head path_symlink; - struct hlist_head path_link; - struct hlist_head path_rename; - struct hlist_head path_chmod; - struct hlist_head path_chown; - struct hlist_head path_chroot; -#endif - /* Needed for inode based modules as well */ - struct hlist_head path_notify; - struct hlist_head inode_alloc_security; - struct hlist_head inode_free_security; - struct hlist_head inode_init_security; - struct hlist_head inode_create; - struct hlist_head inode_link; - struct hlist_head inode_unlink; - struct hlist_head inode_symlink; - struct hlist_head inode_mkdir; - struct hlist_head inode_rmdir; - struct hlist_head inode_mknod; - struct hlist_head inode_rename; - struct hlist_head inode_readlink; - struct hlist_head inode_follow_link; - struct hlist_head inode_permission; - struct hlist_head inode_setattr; - struct hlist_head inode_getattr; - struct hlist_head inode_setxattr; - struct hlist_head inode_post_setxattr; - struct hlist_head inode_getxattr; - struct hlist_head inode_listxattr; - struct hlist_head inode_removexattr; - struct hlist_head inode_need_killpriv; - struct hlist_head inode_killpriv; - struct hlist_head inode_getsecurity; - struct hlist_head inode_setsecurity; - struct hlist_head inode_listsecurity; - struct hlist_head inode_getsecid; - struct hlist_head inode_copy_up; - struct hlist_head inode_copy_up_xattr; - struct hlist_head kernfs_init_security; - struct hlist_head file_permission; - struct hlist_head file_alloc_security; - struct hlist_head file_free_security; - struct hlist_head file_ioctl; - struct hlist_head mmap_addr; - struct hlist_head mmap_file; - struct hlist_head file_mprotect; - struct hlist_head file_lock; - struct hlist_head file_fcntl; - struct hlist_head file_set_fowner; - struct hlist_head file_send_sigiotask; - struct hlist_head file_receive; - struct hlist_head file_open; - struct hlist_head task_alloc; - struct hlist_head task_free; - struct hlist_head cred_alloc_blank; - struct hlist_head cred_free; - struct hlist_head cred_prepare; - struct hlist_head cred_transfer; - struct hlist_head cred_getsecid; - struct hlist_head kernel_act_as; - struct hlist_head kernel_create_files_as; - struct hlist_head kernel_load_data; - struct hlist_head kernel_read_file; - struct hlist_head kernel_post_read_file; - struct hlist_head kernel_module_request; - struct hlist_head task_fix_setuid; - struct hlist_head task_setpgid; - struct hlist_head task_getpgid; - struct hlist_head task_getsid; - struct hlist_head task_getsecid; - struct hlist_head task_setnice; - struct hlist_head task_setioprio; - struct hlist_head task_getioprio; - struct hlist_head task_prlimit; - struct hlist_head task_setrlimit; - struct hlist_head task_setscheduler; - struct hlist_head task_getscheduler; - struct hlist_head task_movememory; - struct hlist_head task_kill; - struct hlist_head task_prctl; - struct hlist_head task_to_inode; - struct hlist_head ipc_permission; - struct hlist_head ipc_getsecid; - struct hlist_head msg_msg_alloc_security; - struct hlist_head msg_msg_free_security; - struct hlist_head msg_queue_alloc_security; - struct hlist_head msg_queue_free_security; - struct hlist_head msg_queue_associate; - struct hlist_head msg_queue_msgctl; - struct hlist_head msg_queue_msgsnd; - struct hlist_head msg_queue_msgrcv; - struct hlist_head shm_alloc_security; - struct hlist_head shm_free_security; - struct hlist_head shm_associate; - struct hlist_head shm_shmctl; - struct hlist_head shm_shmat; - struct hlist_head sem_alloc_security; - struct hlist_head sem_free_security; - struct hlist_head sem_associate; - struct hlist_head sem_semctl; - struct hlist_head sem_semop; - struct hlist_head netlink_send; - struct hlist_head d_instantiate; - struct hlist_head getprocattr; - struct hlist_head setprocattr; - struct hlist_head ismaclabel; - struct hlist_head secid_to_secctx; - struct hlist_head secctx_to_secid; - struct hlist_head release_secctx; - struct hlist_head inode_invalidate_secctx; - struct hlist_head inode_notifysecctx; - struct hlist_head inode_setsecctx; - struct hlist_head inode_getsecctx; -#ifdef CONFIG_SECURITY_NETWORK - struct hlist_head unix_stream_connect; - struct hlist_head unix_may_send; - struct hlist_head socket_create; - struct hlist_head socket_post_create; - struct hlist_head socket_socketpair; - struct hlist_head socket_bind; - struct hlist_head socket_connect; - struct hlist_head socket_listen; - struct hlist_head socket_accept; - struct hlist_head socket_sendmsg; - struct hlist_head socket_recvmsg; - struct hlist_head socket_getsockname; - struct hlist_head socket_getpeername; - struct hlist_head socket_getsockopt; - struct hlist_head socket_setsockopt; - struct hlist_head socket_shutdown; - struct hlist_head socket_sock_rcv_skb; - struct hlist_head socket_getpeersec_stream; - struct hlist_head socket_getpeersec_dgram; - struct hlist_head sk_alloc_security; - struct hlist_head sk_free_security; - struct hlist_head sk_clone_security; - struct hlist_head sk_getsecid; - struct hlist_head sock_graft; - struct hlist_head inet_conn_request; - struct hlist_head inet_csk_clone; - struct hlist_head inet_conn_established; - struct hlist_head secmark_relabel_packet; - struct hlist_head secmark_refcount_inc; - struct hlist_head secmark_refcount_dec; - struct hlist_head req_classify_flow; - struct hlist_head tun_dev_alloc_security; - struct hlist_head tun_dev_free_security; - struct hlist_head tun_dev_create; - struct hlist_head tun_dev_attach_queue; - struct hlist_head tun_dev_attach; - struct hlist_head tun_dev_open; - struct hlist_head sctp_assoc_request; - struct hlist_head sctp_bind_connect; - struct hlist_head sctp_sk_clone; -#endif /* CONFIG_SECURITY_NETWORK */ -#ifdef CONFIG_SECURITY_INFINIBAND - struct hlist_head ib_pkey_access; - struct hlist_head ib_endport_manage_subnet; - struct hlist_head ib_alloc_security; - struct hlist_head ib_free_security; -#endif /* CONFIG_SECURITY_INFINIBAND */ -#ifdef CONFIG_SECURITY_NETWORK_XFRM - struct hlist_head xfrm_policy_alloc_security; - struct hlist_head xfrm_policy_clone_security; - struct hlist_head xfrm_policy_free_security; - struct hlist_head xfrm_policy_delete_security; - struct hlist_head xfrm_state_alloc; - struct hlist_head xfrm_state_alloc_acquire; - struct hlist_head xfrm_state_free_security; - struct hlist_head xfrm_state_delete_security; - struct hlist_head xfrm_policy_lookup; - struct hlist_head xfrm_state_pol_flow_match; - struct hlist_head xfrm_decode_session; -#endif /* CONFIG_SECURITY_NETWORK_XFRM */ -#ifdef CONFIG_KEYS - struct hlist_head key_alloc; - struct hlist_head key_free; - struct hlist_head key_permission; - struct hlist_head key_getsecurity; -#endif /* CONFIG_KEYS */ -#ifdef CONFIG_AUDIT - struct hlist_head audit_rule_init; - struct hlist_head audit_rule_known; - struct hlist_head audit_rule_match; - struct hlist_head audit_rule_free; -#endif /* CONFIG_AUDIT */ -#ifdef CONFIG_BPF_SYSCALL - struct hlist_head bpf; - struct hlist_head bpf_map; - struct hlist_head bpf_prog; - struct hlist_head bpf_map_alloc_security; - struct hlist_head bpf_map_free_security; - struct hlist_head bpf_prog_alloc_security; - struct hlist_head bpf_prog_free_security; -#endif /* CONFIG_BPF_SYSCALL */ - struct hlist_head locked_down; -#ifdef CONFIG_PERF_EVENTS - struct hlist_head perf_event_open; - struct hlist_head perf_event_alloc; - struct hlist_head perf_event_free; - struct hlist_head perf_event_read; - struct hlist_head perf_event_write; -#endif + #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; + #include "lsm_hook_defs.h" + #undef LSM_HOOK } __randomize_layout; /* @@ -2101,6 +1525,12 @@ struct lsm_blob_sizes { }; /* + * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void + * LSM hooks (in include/linux/lsm_hook_defs.h). + */ +#define LSM_RET_VOID ((void) 0) + +/* * Initializing a security_hook_list structure takes * up a lot of space in a source file. This macro takes * care of the common case and reduces the amount of diff --git a/include/linux/math64.h b/include/linux/math64.h index 65bef21cdddb..11a267413e8e 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -3,6 +3,7 @@ #define _LINUX_MATH64_H #include <linux/types.h> +#include <vdso/math64.h> #include <asm/div64.h> #if BITS_PER_LONG == 64 @@ -142,25 +143,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); -static __always_inline u32 -__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) -{ - u32 ret = 0; - - while (dividend >= divisor) { - /* The following asm() prevents the compiler from - optimising this loop into a modulo operation. */ - asm("" : "+rm"(dividend)); - - dividend -= divisor; - ret++; - } - - *remainder = dividend; - - return ret; -} - #ifndef mul_u32_u32 /* * Many a GCC version messes this up and generates a 64x64 mult :-( diff --git a/include/linux/mdio-xpcs.h b/include/linux/mdio-xpcs.h new file mode 100644 index 000000000000..9a841aa5982d --- /dev/null +++ b/include/linux/mdio-xpcs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare XPCS helpers + */ + +#ifndef __LINUX_MDIO_XPCS_H +#define __LINUX_MDIO_XPCS_H + +#include <linux/phy.h> +#include <linux/phylink.h> + +struct mdio_xpcs_args { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + struct mii_bus *bus; + int addr; +}; + +struct mdio_xpcs_ops { + int (*validate)(struct mdio_xpcs_args *xpcs, + unsigned long *supported, + struct phylink_link_state *state); + int (*config)(struct mdio_xpcs_args *xpcs, + const struct phylink_link_state *state); + int (*get_state)(struct mdio_xpcs_args *xpcs, + struct phylink_link_state *state); + int (*link_up)(struct mdio_xpcs_args *xpcs, int speed, + phy_interface_t interface); + int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface); +}; + +#if IS_ENABLED(CONFIG_MDIO_XPCS) +struct mdio_xpcs_ops *mdio_xpcs_get_ops(void); +#else +static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void) +{ + return NULL; +} +#endif + +#endif /* __LINUX_MDIO_XPCS_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index a7604248777b..917e4bb2ed71 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -316,11 +316,15 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising, int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, + u16 mask, u16 set); int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, + u16 set); int mdiobus_register_device(struct mdio_device *mdiodev); int mdiobus_unregister_device(struct mdio_device *mdiodev); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index a7a0a1a5c8d5..1b4150ff64be 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -695,6 +695,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); +void mod_memcg_obj_state(void *p, int idx, int val); static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) @@ -1123,6 +1124,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, __mod_node_page_state(page_pgdat(page), idx, val); } +static inline void mod_memcg_obj_state(void *p, int idx, int val) +{ +} + static inline unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1362,12 +1367,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); void memcg_kmem_put_cache(struct kmem_cache *cachep); #ifdef CONFIG_MEMCG_KMEM -int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); -void __memcg_kmem_uncharge(struct page *page, int order); -int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg); -void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, - unsigned int nr_pages); +int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, + unsigned int nr_pages); +void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); +int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); +void __memcg_kmem_uncharge_page(struct page *page, int order); extern struct static_key_false memcg_kmem_enabled_key; extern struct workqueue_struct *memcg_kmem_cache_wq; @@ -1389,32 +1393,33 @@ static inline bool memcg_kmem_enabled(void) return static_branch_unlikely(&memcg_kmem_enabled_key); } -static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, + int order) { if (memcg_kmem_enabled()) - return __memcg_kmem_charge(page, gfp, order); + return __memcg_kmem_charge_page(page, gfp, order); return 0; } -static inline void memcg_kmem_uncharge(struct page *page, int order) +static inline void memcg_kmem_uncharge_page(struct page *page, int order) { if (memcg_kmem_enabled()) - __memcg_kmem_uncharge(page, order); + __memcg_kmem_uncharge_page(page, order); } -static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, - int order, struct mem_cgroup *memcg) +static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, + unsigned int nr_pages) { if (memcg_kmem_enabled()) - return __memcg_kmem_charge_memcg(page, gfp, order, memcg); + return __memcg_kmem_charge(memcg, gfp, nr_pages); return 0; } -static inline void memcg_kmem_uncharge_memcg(struct page *page, int order, - struct mem_cgroup *memcg) +static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg, + unsigned int nr_pages) { if (memcg_kmem_enabled()) - __memcg_kmem_uncharge_memcg(memcg, 1 << order); + __memcg_kmem_uncharge(memcg, nr_pages); } /* @@ -1427,23 +1432,27 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } +struct mem_cgroup *mem_cgroup_from_obj(void *p); + #else -static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, + int order) { return 0; } -static inline void memcg_kmem_uncharge(struct page *page, int order) +static inline void memcg_kmem_uncharge_page(struct page *page, int order) { } -static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, + int order) { return 0; } -static inline void __memcg_kmem_uncharge(struct page *page, int order) +static inline void __memcg_kmem_uncharge_page(struct page *page, int order) { } @@ -1468,6 +1477,11 @@ static inline void memcg_put_cache_ids(void) { } +static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) +{ + return NULL; +} + #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 0b8d791b6669..439a89e758d8 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -26,7 +26,6 @@ struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ - int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ int phys_device; /* to which fru does this belong? */ struct device dev; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index f4d59155f3d4..ef55115320fb 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -47,9 +47,13 @@ enum { /* Types for control the zone type of onlined and offlined memory */ enum { - MMOP_OFFLINE = -1, - MMOP_ONLINE_KEEP, + /* Offline the memory. */ + MMOP_OFFLINE = 0, + /* Online the memory. Zone depends, see default_zone_for_pfn(). */ + MMOP_ONLINE, + /* Online the memory to ZONE_NORMAL. */ MMOP_ONLINE_KERNEL, + /* Online the memory to ZONE_MOVABLE. */ MMOP_ONLINE_MOVABLE, }; @@ -113,7 +117,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_restrictions *restrictions); extern u64 max_mem_size; -extern bool memhp_auto_online; +extern int memhp_online_type_from_str(const char *str); + +/* Default online_type (MMOP_*) when new memory blocks are added. */ +extern int memhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5228c62af416..8165278c348a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -173,34 +173,7 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol); extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ -static inline bool vma_migratable(struct vm_area_struct *vma) -{ - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) - return false; - - /* - * DAX device mappings require predictable access latency, so avoid - * incurring periodic faults. - */ - if (vma_is_dax(vma)) - return false; - -#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION - if (vma->vm_flags & VM_HUGETLB) - return false; -#endif - - /* - * Migration allocates pages in the highest zone. If we cannot - * do so then migration (at least from node to node) is not - * possible. - */ - if (vma->vm_file && - gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) - < policy_zone) - return false; - return true; -} +extern bool vma_migratable(struct vm_area_struct *vma); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 6fefb09af7c3..8b37c4c9222c 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -98,11 +98,12 @@ struct dev_pagemap_ops { * @ref: reference count that pins the devm_memremap_pages() mapping * @internal_ref: internal reference if @ref is not provided by the caller * @done: completion for @internal_ref - * @dev: host device of the mapping for debug - * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h * @flags: PGMAP_* flags to specify defailed behavior * @ops: method table + * @owner: an opaque pointer identifying the entity that manages this + * instance. Used by various helpers to make sure that no + * foreign ZONE_DEVICE memory is accessed. */ struct dev_pagemap { struct vmem_altmap altmap; @@ -113,6 +114,7 @@ struct dev_pagemap { enum memory_type type; unsigned int flags; const struct dev_pagemap_ops *ops; + void *owner; }; static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h new file mode 100644 index 000000000000..043d3b6de9ec --- /dev/null +++ b/include/linux/mfd/iqs62x.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors + * + * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com> + */ + +#ifndef __LINUX_MFD_IQS62X_H +#define __LINUX_MFD_IQS62X_H + +#define IQS620_PROD_NUM 0x41 +#define IQS621_PROD_NUM 0x46 +#define IQS622_PROD_NUM 0x42 +#define IQS624_PROD_NUM 0x43 +#define IQS625_PROD_NUM 0x4E + +#define IQS621_ALS_FLAGS 0x16 +#define IQS622_ALS_FLAGS 0x14 + +#define IQS624_HALL_UI 0x70 +#define IQS624_HALL_UI_WHL_EVENT BIT(4) +#define IQS624_HALL_UI_INT_EVENT BIT(3) +#define IQS624_HALL_UI_AUTO_CAL BIT(2) + +#define IQS624_INTERVAL_DIV 0x7D + +#define IQS620_GLBL_EVENT_MASK 0xD7 +#define IQS620_GLBL_EVENT_MASK_PMU BIT(6) + +#define IQS62X_NUM_KEYS 16 +#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 5) + +#define IQS62X_EVENT_SIZE 10 + +enum iqs62x_ui_sel { + IQS62X_UI_PROX, + IQS62X_UI_SAR1, +}; + +enum iqs62x_event_reg { + IQS62X_EVENT_NONE, + IQS62X_EVENT_SYS, + IQS62X_EVENT_PROX, + IQS62X_EVENT_HYST, + IQS62X_EVENT_HALL, + IQS62X_EVENT_ALS, + IQS62X_EVENT_IR, + IQS62X_EVENT_WHEEL, + IQS62X_EVENT_INTER, + IQS62X_EVENT_UI_LO, + IQS62X_EVENT_UI_HI, +}; + +enum iqs62x_event_flag { + /* keys */ + IQS62X_EVENT_PROX_CH0_T, + IQS62X_EVENT_PROX_CH0_P, + IQS62X_EVENT_PROX_CH1_T, + IQS62X_EVENT_PROX_CH1_P, + IQS62X_EVENT_PROX_CH2_T, + IQS62X_EVENT_PROX_CH2_P, + IQS62X_EVENT_HYST_POS_T, + IQS62X_EVENT_HYST_POS_P, + IQS62X_EVENT_HYST_NEG_T, + IQS62X_EVENT_HYST_NEG_P, + IQS62X_EVENT_SAR1_ACT, + IQS62X_EVENT_SAR1_QRD, + IQS62X_EVENT_SAR1_MOVE, + IQS62X_EVENT_SAR1_HALT, + IQS62X_EVENT_WHEEL_UP, + IQS62X_EVENT_WHEEL_DN, + + /* switches */ + IQS62X_EVENT_HALL_N_T, + IQS62X_EVENT_HALL_N_P, + IQS62X_EVENT_HALL_S_T, + IQS62X_EVENT_HALL_S_P, + + /* everything else */ + IQS62X_EVENT_SYS_RESET, +}; + +struct iqs62x_event_data { + u16 ui_data; + u8 als_flags; + u8 ir_flags; + u8 interval; +}; + +struct iqs62x_event_desc { + enum iqs62x_event_reg reg; + u8 mask; + u8 val; +}; + +struct iqs62x_dev_desc { + const char *dev_name; + const struct mfd_cell *sub_devs; + int num_sub_devs; + + u8 prod_num; + u8 sw_num; + const u8 *cal_regs; + int num_cal_regs; + + u8 prox_mask; + u8 sar_mask; + u8 hall_mask; + u8 hyst_mask; + u8 temp_mask; + u8 als_mask; + u8 ir_mask; + + u8 prox_settings; + u8 als_flags; + u8 hall_flags; + u8 hyst_shift; + + u8 interval; + u8 interval_div; + + u8 clk_div; + const char *fw_name; + const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE]; +}; + +struct iqs62x_core { + const struct iqs62x_dev_desc *dev_desc; + struct i2c_client *client; + struct regmap *regmap; + struct blocking_notifier_head nh; + struct list_head fw_blk_head; + struct completion fw_done; + enum iqs62x_ui_sel ui_sel; +}; + +extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS]; + +#endif /* __LINUX_MFD_IQS62X_H */ diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index a59bf323f713..e07f6e61cd38 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -620,7 +620,5 @@ struct rk808 { long variant; const struct regmap_config *regmap_cfg; const struct regmap_irq_chip *regmap_irq_chip; - void (*pm_pwroff_fn)(void); - void (*pm_pwroff_prep_fn)(void); }; #endif /* __LINUX_REGULATOR_RK808_H */ diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index d62ef48060b5..fba0df13d9a8 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -139,6 +139,17 @@ #define RN5T618_INTPOL 0x9c #define RN5T618_INTEN 0x9d #define RN5T618_INTMON 0x9e + +#define RN5T618_RTC_SECONDS 0xA0 +#define RN5T618_RTC_MDAY 0xA4 +#define RN5T618_RTC_MONTH 0xA5 +#define RN5T618_RTC_YEAR 0xA6 +#define RN5T618_RTC_ADJUST 0xA7 +#define RN5T618_RTC_ALARM_Y_SEC 0xA8 +#define RN5T618_RTC_DAL_MONTH 0xAC +#define RN5T618_RTC_CTRL1 0xAE +#define RN5T618_RTC_CTRL2 0xAF + #define RN5T618_PREVINDAC 0xb0 #define RN5T618_BATDAC 0xb1 #define RN5T618_CHGCTL1 0xb3 @@ -242,9 +253,24 @@ enum { RC5T619, }; +/* RN5T618 IRQ definitions */ +enum { + RN5T618_IRQ_SYS = 0, + RN5T618_IRQ_DCDC, + RN5T618_IRQ_RTC, + RN5T618_IRQ_ADC, + RN5T618_IRQ_GPIO, + RN5T618_IRQ_CHG, + RN5T618_NR_IRQS, +}; + struct rn5t618 { struct regmap *regmap; + struct device *dev; long variant; + + int irq; + struct regmap_irq_chip_data *irq_data; }; #endif /* __LINUX_MFD_RN5T618_H */ diff --git a/include/linux/mfd/sc27xx-pmic.h b/include/linux/mfd/sc27xx-pmic.h new file mode 100644 index 000000000000..57e45c0b3ae2 --- /dev/null +++ b/include/linux/mfd/sc27xx-pmic.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_MFD_SC27XX_PMIC_H +#define __LINUX_MFD_SC27XX_PMIC_H + +extern enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev); + +#endif /* __LINUX_MFD_SC27XX_PMIC_H */ diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index 81e7dcbd94df..6e2962ef5b81 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -33,7 +33,7 @@ struct wm8994_ldo_pdata { * DRC configurations are specified with a label and a set of register * values to write (the enable bits will be ignored). At runtime an * enumerated control will be presented for each DRC block allowing - * the user to choose the configration to use. + * the user to choose the configuration to use. * * Configurations may be generated by hand or by using the DRC control * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/ diff --git a/include/linux/mhi.h b/include/linux/mhi.h new file mode 100644 index 000000000000..ad1996001965 --- /dev/null +++ b/include/linux/mhi.h @@ -0,0 +1,700 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + */ +#ifndef _MHI_H_ +#define _MHI_H_ + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/mutex.h> +#include <linux/rwlock_types.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct mhi_buf_info; + +/** + * enum mhi_callback - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env + * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover) + * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state + * @MHI_CB_BW_REQ: Received a bandwidth switch request from device + */ +enum mhi_callback { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, + MHI_CB_SYS_ERROR, + MHI_CB_FATAL_ERROR, + MHI_CB_BW_REQ, +}; + +/** + * enum mhi_flags - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum mhi_flags { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * enum mhi_device_type - Device types + * @MHI_DEVICE_XFER: Handles data transfer + * @MHI_DEVICE_CONTROLLER: Control device + */ +enum mhi_device_type { + MHI_DEVICE_XFER, + MHI_DEVICE_CONTROLLER, +}; + +/** + * enum mhi_ch_type - Channel types + * @MHI_CH_TYPE_INVALID: Invalid channel type + * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device + * @MHI_CH_TYPE_INBOUND: Inbound channel from the device + * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine + * multiple packets and send them as a single + * large packet to reduce CPU consumption + */ +enum mhi_ch_type { + MHI_CH_TYPE_INVALID = 0, + MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, + MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, + MHI_CH_TYPE_INBOUND_COALESCED = 3, +}; + +/** + * struct image_info - Firmware and RDDM table table + * @mhi_buf - Buffer for firmware and RDDM table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + +/** + * struct mhi_link_info - BW requirement + * target_link_speed - Link speed as defined by TLS bits in LinkControl reg + * target_link_width - Link width as defined by NLW bits in LinkStatus reg + */ +struct mhi_link_info { + unsigned int target_link_speed; + unsigned int target_link_width; +}; + +/** + * enum mhi_ee_type - Execution environment types + * @MHI_EE_PBL: Primary Bootloader + * @MHI_EE_SBL: Secondary Bootloader + * @MHI_EE_AMSS: Modem, aka the primary runtime EE + * @MHI_EE_RDDM: Ram dump download mode + * @MHI_EE_WFW: WLAN firmware mode + * @MHI_EE_PTHRU: Passthrough + * @MHI_EE_EDL: Embedded downloader + */ +enum mhi_ee_type { + MHI_EE_PBL, + MHI_EE_SBL, + MHI_EE_AMSS, + MHI_EE_RDDM, + MHI_EE_WFW, + MHI_EE_PTHRU, + MHI_EE_EDL, + MHI_EE_MAX_SUPPORTED = MHI_EE_EDL, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_NOT_SUPPORTED, + MHI_EE_MAX, +}; + +/** + * enum mhi_state - MHI states + * @MHI_STATE_RESET: Reset state + * @MHI_STATE_READY: Ready state + * @MHI_STATE_M0: M0 state + * @MHI_STATE_M1: M1 state + * @MHI_STATE_M2: M2 state + * @MHI_STATE_M3: M3 state + * @MHI_STATE_M3_FAST: M3 Fast state + * @MHI_STATE_BHI: BHI state + * @MHI_STATE_SYS_ERR: System Error state + */ +enum mhi_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_M3_FAST = 0x6, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +/** + * enum mhi_ch_ee_mask - Execution environment mask for channel + * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE + * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE + * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE + * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE + * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE + * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE + * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE + */ +enum mhi_ch_ee_mask { + MHI_CH_EE_PBL = BIT(MHI_EE_PBL), + MHI_CH_EE_SBL = BIT(MHI_EE_SBL), + MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), + MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), + MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), + MHI_CH_EE_WFW = BIT(MHI_EE_WFW), + MHI_CH_EE_EDL = BIT(MHI_EE_EDL), +}; + +/** + * enum mhi_er_data_type - Event ring data types + * @MHI_ER_DATA: Only client data over this ring + * @MHI_ER_CTRL: MHI control data and client data + */ +enum mhi_er_data_type { + MHI_ER_DATA, + MHI_ER_CTRL, +}; + +/** + * enum mhi_db_brst_mode - Doorbell mode + * @MHI_DB_BRST_DISABLE: Burst mode disable + * @MHI_DB_BRST_ENABLE: Burst mode enable + */ +enum mhi_db_brst_mode { + MHI_DB_BRST_DISABLE = 0x2, + MHI_DB_BRST_ENABLE = 0x3, +}; + +/** + * struct mhi_channel_config - Channel configuration structure for controller + * @name: The name of this channel + * @num: The number assigned to this channel + * @num_elements: The number of elements that can be queued to this channel + * @local_elements: The local ring length of the channel + * @event_ring: The event rung index that services this channel + * @dir: Direction that data may flow on this channel + * @type: Channel type + * @ee_mask: Execution Environment mask for this channel + * @pollcfg: Polling configuration for burst mode. 0 is default. milliseconds + for UL channels, multiple of 8 ring elements for DL channels + * @doorbell: Doorbell mode + * @lpm_notify: The channel master requires low power mode notifications + * @offload_channel: The client manages the channel completely + * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition + * @auto_queue: Framework will automatically queue buffers for DL traffic + * @auto_start: Automatically start (open) this channel + * @wake-capable: Channel capable of waking up the system + */ +struct mhi_channel_config { + char *name; + u32 num; + u32 num_elements; + u32 local_elements; + u32 event_ring; + enum dma_data_direction dir; + enum mhi_ch_type type; + u32 ee_mask; + u32 pollcfg; + enum mhi_db_brst_mode doorbell; + bool lpm_notify; + bool offload_channel; + bool doorbell_mode_switch; + bool auto_queue; + bool auto_start; + bool wake_capable; +}; + +/** + * struct mhi_event_config - Event ring configuration structure for controller + * @num_elements: The number of elements that can be queued to this ring + * @irq_moderation_ms: Delay irq for additional events to be aggregated + * @irq: IRQ associated with this ring + * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring + * @priority: Priority of this ring. Use 1 for now + * @mode: Doorbell mode + * @data_type: Type of data this ring will process + * @hardware_event: This ring is associated with hardware channels + * @client_managed: This ring is client managed + * @offload_channel: This ring is associated with an offloaded channel + */ +struct mhi_event_config { + u32 num_elements; + u32 irq_moderation_ms; + u32 irq; + u32 channel; + u32 priority; + enum mhi_db_brst_mode mode; + enum mhi_er_data_type data_type; + bool hardware_event; + bool client_managed; + bool offload_channel; +}; + +/** + * struct mhi_controller_config - Root MHI controller configuration + * @max_channels: Maximum number of channels supported + * @timeout_ms: Timeout value for operations. 0 means use default + * @buf_len: Size of automatically allocated buffers. 0 means use default + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + * @num_events: Number of event rings defined in @event_cfg + * @event_cfg: Array of defined event rings + * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access + * @m2_no_db: Host is not allowed to ring DB in M2 state + */ +struct mhi_controller_config { + u32 max_channels; + u32 timeout_ms; + u32 buf_len; + u32 num_channels; + struct mhi_channel_config *ch_cfg; + u32 num_events; + struct mhi_event_config *event_cfg; + bool use_bounce_buf; + bool m2_no_db; +}; + +/** + * struct mhi_controller - Master MHI controller structure + * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI + * controller (required) + * @mhi_dev: MHI device instance for the controller + * @regs: Base address of MHI MMIO register space (required) + * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space + * @wake_db: MHI WAKE doorbell register address + * @iova_start: IOMMU starting address for data (required) + * @iova_stop: IOMMU stop address for data (required) + * @fw_image: Firmware image name for normal booting (required) + * @edl_image: Firmware image name for emergency download mode (optional) + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size downloaded through BHIe (optional) + * @seg_len: BHIe vector size (optional) + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @mhi_chan: Points to the channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @irq: base irq # to request (required) + * @max_chan: Maximum number of channels the controller supports + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @nr_irqs_req: Number of IRQs required to operate (optional) + * @nr_irqs: Number of IRQ allocated by bus master (required) + * @family_number: MHI controller family number + * @device_number: MHI controller device number + * @major_version: MHI controller major revision number + * @minor_version: MHI controller minor revision number + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @pm_mutex: Mutex for suspend/resume operation + * @pm_lock: Lock for protecting MHI power management state + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: MHI power management state + * @db_access: DB access states + * @ee: MHI device execution environment + * @dev_state: MHI device state + * @dev_wake: Device wakeup count + * @pending_pkts: Pending packets for the controller + * @transition_list: List of MHI state transitions + * @transition_lock: Lock for protecting MHI state transition list + * @wlock: Lock for protecting device wakeup + * @mhi_link_info: Device bandwidth info + * @st_worker: State transition worker + * @fw_worker: Firmware download worker + * @syserr_worker: System error worker + * @state_event: State change event + * @status_cb: CB function to notify power states of the device (required) + * @link_status: CB function to query link status of the device (required) + * @wake_get: CB function to assert device wake (optional) + * @wake_put: CB function to de-assert device wake (optional) + * @wake_toggle: CB function to assert and de-assert device wake (optional) + * @runtime_get: CB function to controller runtime resume (required) + * @runtimet_put: CB function to decrement pm usage (required) + * @map_single: CB function to create TRE buffer + * @unmap_single: CB function to destroy TRE buffer + * @buffer_len: Bounce buffer length + * @bounce_buf: Use of bounce buffer + * @fbc_download: MHI host needs to do complete image transfer (optional) + * @pre_init: MHI host needs to do pre-initialization before power up + * @wake_set: Device wakeup set flag + * + * Fields marked as (required) need to be populated by the controller driver + * before calling mhi_register_controller(). For the fields marked as (optional) + * they can be populated depending on the usecase. + * + * The following fields are present for the purpose of implementing any device + * specific quirks or customizations for specific MHI revisions used in device + * by the controller drivers. The MHI stack will just populate these fields + * during mhi_register_controller(): + * family_number + * device_number + * major_version + * minor_version + */ +struct mhi_controller { + struct device *cntrl_dev; + struct mhi_device *mhi_dev; + void __iomem *regs; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + + dma_addr_t iova_start; + dma_addr_t iova_stop; + const char *fw_image; + const char *edl_image; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + struct image_info *fbc_image; + struct image_info *rddm_image; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; + int *irq; + u32 max_chan; + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 nr_irqs_req; + u32 nr_irqs; + u32 family_number; + u32 device_number; + u32 major_version; + u32 minor_version; + + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + struct mhi_ctxt *mhi_ctxt; + + struct mutex pm_mutex; + rwlock_t pm_lock; + u32 timeout_ms; + u32 pm_state; + u32 db_access; + enum mhi_ee_type ee; + enum mhi_state dev_state; + atomic_t dev_wake; + atomic_t pending_pkts; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + struct mhi_link_info mhi_link_info; + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct syserr_worker; + wait_queue_head_t state_event; + + void (*status_cb)(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb); + int (*link_status)(struct mhi_controller *mhi_cntrl); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_toggle)(struct mhi_controller *mhi_cntrl); + int (*runtime_get)(struct mhi_controller *mhi_cntrl); + void (*runtime_put)(struct mhi_controller *mhi_cntrl); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + + size_t buffer_len; + bool bounce_buf; + bool fbc_download; + bool pre_init; + bool wake_set; +}; + +/** + * struct mhi_device - Structure representing a MHI device which binds + * to channels + * @id: Pointer to MHI device ID struct + * @chan_name: Name of the channel to which the device binds + * @mhi_cntrl: Controller the device belongs to + * @ul_chan: UL channel for the device + * @dl_chan: DL channel for the device + * @dev: Driver model device node for the MHI device + * @dev_type: MHI device type + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @dev_wake: Device wakeup counter + */ +struct mhi_device { + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + struct device dev; + enum mhi_device_type dev_type; + int ul_chan_id; + int dl_chan_id; + u32 dev_wake; +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @bytes_xferd: # of bytes transferred + * @dir: Channel direction + * @transaction_status: Status of last transaction + */ +struct mhi_result { + void *buf_addr; + size_t bytes_xferd; + enum dma_data_direction dir; + int transaction_status; +}; + +/** + * struct mhi_buf - MHI Buffer description + * @buf: Virtual address of the buffer + * @name: Buffer label. For offload channel, configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + * @dma_addr: IOMMU address of the buffer + * @len: # of bytes + */ +struct mhi_buf { + void *buf; + const char *name; + dma_addr_t dma_addr; + size_t len; +}; + +/** + * struct mhi_driver - Structure representing a MHI client driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL data transfer + * @dl_xfer_cb: CB function for DL data transfer + * @status_cb: CB functions for asynchronous status + * @driver: Device driver model driver + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, + struct mhi_result *result); + void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +/** + * mhi_register_controller - Register MHI controller + * @mhi_cntrl: MHI controller to register + * @config: Configuration to use for the controller + */ +int mhi_register_controller(struct mhi_controller *mhi_cntrl, + struct mhi_controller_config *config); + +/** + * mhi_unregister_controller - Unregister MHI controller + * @mhi_cntrl: MHI controller to unregister + */ +void mhi_unregister_controller(struct mhi_controller *mhi_cntrl); + +/* + * module_mhi_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_driver_register() and + * mhi_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_driver_register, \ + mhi_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_driver_register(mhi_drv) \ + __mhi_driver_register(mhi_drv, THIS_MODULE) + +/** + * __mhi_driver_register - Register driver with MHI framework + * @mhi_drv: Driver associated with the device + * @owner: The module owner + */ +int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: Driver associated with the device + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_set_mhi_state - Set MHI device state + * @mhi_cntrl: MHI controller + * @state: State to set + */ +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_state state); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up. + * This is optional, call this before power up if + * the controller does not want bus framework to + * automatically free any allocated memory during + * shutdown process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Start MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_sync_power_up - Start MHI power up sequence and wait till the device + * device enters valid EE state + * @mhi_cntrl: MHI controller + */ +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: Link is still accessible, so do a graceful shutdown process + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_power_down - Free any allocated memory after power down + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: Download rddm image during kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force device into rddm mode + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_mhi_state - Get MHI state of the device + * @mhi_cntrl: MHI controller + */ +enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); + +/** + * mhi_device_get - Disable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - Disable device low power mode. Synchronously + * take the controller out of suspended state + * @mhi_dev: Device associated with the channel + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - Re-enable device low power mode + * @mhi_dev: Device associated with the channel + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - Setup channel for data transfer + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer - Unprepare the channels + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_poll - Poll for any available data in DL direction + * @mhi_dev: Device associated with the channels + * @budget: # of events to process + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_queue_dma - Send or receive DMA mapped buffers from client device + * over MHI channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @mhi_buf: Buffer for holding the DMA mapped data + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_queue_buf - Send or receive raw buffers from client device over MHI + * channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @buf: Buffer for holding the data + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, + void *buf, size_t len, enum mhi_flags mflags); + +/** + * mhi_queue_skb - Send or receive SKBs from client device over MHI channel + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * @skb: Buffer for holding SKBs + * @len: Buffer length + * @mflags: MHI transfer flags used for the transfer + */ +int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags); + +#endif /* _MHI_H_ */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 72120061b7d4..3e546cbf03dd 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -196,6 +196,14 @@ struct migrate_vma { unsigned long npages; unsigned long start; unsigned long end; + + /* + * Set to the owner value also stored in page->pgmap->owner for + * migrating out of device private memory. If set only device + * private pages with this owner are migrated. If not set + * device private pages are not migrated at all. + */ + void *src_owner; }; int migrate_vma_setup(struct migrate_vma *args); diff --git a/include/linux/mii.h b/include/linux/mii.h index 18c6208f56fc..219b93cad1dd 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -355,24 +355,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) } /** - * mii_lpa_to_ethtool_lpa_x - * @adv: value of the MII_LPA register - * - * A small helper function that translates MII_LPA - * bits, when in 1000Base-X mode, to ethtool - * LP advertisement settings. - */ -static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) -{ - u32 result = 0; - - if (lpa & LPA_LPACK) - result |= ADVERTISED_Autoneg; - - return result | mii_adv_to_ethtool_adv_x(lpa); -} - -/** * mii_lpa_mod_linkmode_adv_sgmii * @lp_advertising: pointer to destination link mode. * @lpa: value of the MII_LPA register @@ -536,6 +518,45 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) } /** + * mii_lpa_mod_linkmode_x - decode the link partner's config_reg to linkmodes + * @linkmodes: link modes array + * @lpa: config_reg word from link partner + * @fd_bit: link mode for 1000XFULL bit + */ +static inline void mii_lpa_mod_linkmode_x(unsigned long *linkmodes, u16 lpa, + int fd_bit) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, linkmodes, + lpa & LPA_LPACK); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes, + lpa & LPA_1000XPAUSE); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes, + lpa & LPA_1000XPAUSE_ASYM); + linkmode_mod_bit(fd_bit, linkmodes, + lpa & LPA_1000XFULL); +} + +/** + * linkmode_adv_to_mii_adv_x - encode a linkmode to config_reg + * @linkmodes: linkmodes + * @fd_bit: full duplex bit + */ +static inline u16 linkmode_adv_to_mii_adv_x(const unsigned long *linkmodes, + int fd_bit) +{ + u16 adv = 0; + + if (linkmode_test_bit(fd_bit, linkmodes)) + adv |= ADVERTISE_1000XFULL; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes)) + adv |= ADVERTISE_1000XPAUSE; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes)) + adv |= ADVERTISE_1000XPSE_ASYM; + + return adv; +} + +/** * mii_advertise_flowctrl - get flow control advertisement flags * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) */ diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h new file mode 100644 index 000000000000..44077837385f --- /dev/null +++ b/include/linux/min_heap.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MIN_HEAP_H +#define _LINUX_MIN_HEAP_H + +#include <linux/bug.h> +#include <linux/string.h> +#include <linux/types.h> + +/** + * struct min_heap - Data structure to hold a min-heap. + * @data: Start of array holding the heap elements. + * @nr: Number of elements currently in the heap. + * @size: Maximum number of elements that can be held in current storage. + */ +struct min_heap { + void *data; + int nr; + int size; +}; + +/** + * struct min_heap_callbacks - Data/functions to customise the min_heap. + * @elem_size: The nr of each element in bytes. + * @less: Partial order function for this heap. + * @swp: Swap elements function. + */ +struct min_heap_callbacks { + int elem_size; + bool (*less)(const void *lhs, const void *rhs); + void (*swp)(void *lhs, void *rhs); +}; + +/* Sift the element at pos down the heap. */ +static __always_inline +void min_heapify(struct min_heap *heap, int pos, + const struct min_heap_callbacks *func) +{ + void *left, *right, *parent, *smallest; + void *data = heap->data; + + for (;;) { + if (pos * 2 + 1 >= heap->nr) + break; + + left = data + ((pos * 2 + 1) * func->elem_size); + parent = data + (pos * func->elem_size); + smallest = parent; + if (func->less(left, smallest)) + smallest = left; + + if (pos * 2 + 2 < heap->nr) { + right = data + ((pos * 2 + 2) * func->elem_size); + if (func->less(right, smallest)) + smallest = right; + } + if (smallest == parent) + break; + func->swp(smallest, parent); + if (smallest == left) + pos = (pos * 2) + 1; + else + pos = (pos * 2) + 2; + } +} + +/* Floyd's approach to heapification that is O(nr). */ +static __always_inline +void min_heapify_all(struct min_heap *heap, + const struct min_heap_callbacks *func) +{ + int i; + + for (i = heap->nr / 2; i >= 0; i--) + min_heapify(heap, i, func); +} + +/* Remove minimum element from the heap, O(log2(nr)). */ +static __always_inline +void min_heap_pop(struct min_heap *heap, + const struct min_heap_callbacks *func) +{ + void *data = heap->data; + + if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) + return; + + /* Place last element at the root (position 0) and then sift down. */ + heap->nr--; + memcpy(data, data + (heap->nr * func->elem_size), func->elem_size); + min_heapify(heap, 0, func); +} + +/* + * Remove the minimum element and then push the given element. The + * implementation performs 1 sift (O(log2(nr))) and is therefore more + * efficient than a pop followed by a push that does 2. + */ +static __always_inline +void min_heap_pop_push(struct min_heap *heap, + const void *element, + const struct min_heap_callbacks *func) +{ + memcpy(heap->data, element, func->elem_size); + min_heapify(heap, 0, func); +} + +/* Push an element on to the heap, O(log2(nr)). */ +static __always_inline +void min_heap_push(struct min_heap *heap, const void *element, + const struct min_heap_callbacks *func) +{ + void *data = heap->data; + void *child, *parent; + int pos; + + if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap")) + return; + + /* Place at the end of data. */ + pos = heap->nr; + memcpy(data + (pos * func->elem_size), element, func->elem_size); + heap->nr++; + + /* Sift child at pos up. */ + for (; pos > 0; pos = (pos - 1) / 2) { + child = data + (pos * func->elem_size); + parent = data + ((pos - 1) / 2) * func->elem_size; + if (func->less(parent, child)) + break; + func->swp(parent, child); + } +} + +#endif /* _LINUX_MIN_HEAP_H */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index becde6981a95..c7a93002a3c1 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -25,20 +25,31 @@ #define TEMP_MINOR 131 /* Temperature Sensor */ #define APM_MINOR_DEV 134 #define RTC_MINOR 135 -#define EFI_RTC_MINOR 136 /* EFI Time services */ +/*#define EFI_RTC_MINOR 136 was EFI Time services */ #define VHCI_MINOR 137 #define SUN_OPENPROM_MINOR 139 #define DMAPI_MINOR 140 /* unused */ #define NVRAM_MINOR 144 +#define SBUS_FLASH_MINOR 152 #define SGI_MMTIMER 153 +#define PMU_MINOR 154 #define STORE_QUEUE_MINOR 155 /* unused */ +#define LCD_MINOR 156 +#define AC_MINOR 157 +#define BUTTON_MINOR 158 /* Major 10, Minor 158, /dev/nwbutton */ +#define NWFLASH_MINOR 160 /* MAJOR is 10 - miscdevice */ +#define ENVCTRL_MINOR 162 #define I2O_MINOR 166 +#define UCTRL_MINOR 174 #define AGPGART_MINOR 175 +#define TOSH_MINOR_DEV 181 #define HWRNG_MINOR 183 #define MICROCODE_MINOR 184 +#define KEYPAD_MINOR 185 #define IRNET_MINOR 187 #define D7S_MINOR 193 #define VFIO_MINOR 196 +#define PXA3XX_GCU_MINOR 197 #define TUN_MINOR 200 #define CUSE_MINOR 203 #define MWAVE_MINOR 219 /* ACP/Mwave Modem */ @@ -49,6 +60,7 @@ #define MISC_MCELOG_MINOR 227 #define HPET_MINOR 228 #define FUSE_MINOR 229 +#define SNAPSHOT_MINOR 231 #define KVM_MINOR 232 #define BTRFS_MINOR 234 #define AUTOFS_MINOR 235 diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0e62c3db45e5..2b90097a6cf9 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1211,6 +1211,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap) +#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) + +#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \ + MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 277a51d3ec40..6f8f79ef829b 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -213,23 +213,6 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -struct mlx5_bfreg_info { - u32 *sys_pages; - int num_low_latency_bfregs; - unsigned int *count; - - /* - * protect bfreg allocation data structs - */ - struct mutex lock; - u32 ver; - bool lib_uar_4k; - u32 num_sys_pages; - u32 num_static_sys_pages; - u32 total_num_bfregs; - u32 num_dyn_bfregs; -}; - struct mlx5_cmd_first { __be32 data[4]; }; @@ -518,9 +501,11 @@ struct mlx5_rate_limit { }; struct mlx5_rl_entry { - struct mlx5_rate_limit rl; - u16 index; - u16 refcount; + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; + u16 index; + u64 refcount; + u16 uid; + u8 dedicated : 1; }; struct mlx5_rl_table { @@ -573,10 +558,6 @@ struct mlx5_priv { /* end: alloc staff */ struct dentry *dbg_root; - /* protect mkey key part */ - spinlock_t mkey_lock; - u8 mkey_key; - struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; @@ -722,6 +703,7 @@ struct mlx5_core_dev { struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; + struct mlx5_rsc_dump *rsc_dump; u32 vsc_addr; struct mlx5_hv_vhca *hv_vhca; }; @@ -945,12 +927,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); -int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - struct mlx5_async_ctx *async_ctx, u32 *in, - int inlen, u32 *out, int outlen, - mlx5_async_cbk_t callback, - struct mlx5_async_work *context); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen); @@ -1007,6 +983,9 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, struct mlx5_rate_limit *rl); void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, + bool dedicated_entry, u16 *index); +void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, struct mlx5_rate_limit *rl_1); int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 98e667b176ef..c16827eeba9c 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -70,8 +70,30 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); enum devlink_eswitch_encap_mode mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); +bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw); bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); -u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, + +/* Reg C0 usage: + * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) > + * + * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num, + * the rest (lowest 16 bits) is left for tc chain tag restoration. + * VHCA_ID + VPORT comprise the SOURCE_PORT matching. + */ +#define ESW_VHCA_ID_BITS 8 +#define ESW_VPORT_BITS 8 +#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS) +#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS) +#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) +#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\ + 0) + +static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) +{ + return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS); +} + +u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num); u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ @@ -88,17 +110,29 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) } static inline bool +mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) +{ + return false; +}; + +static inline bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) { return false; }; static inline u32 -mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw, +mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, int vport_num) { return 0; }; + +static inline u32 +mlx5_eswitch_get_vport_metadata_mask(void) +{ + return 0; +} #endif /* CONFIG_MLX5_ESWITCH */ #endif diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 4cae16016b2b..e2d13e074067 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -77,6 +77,7 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, + MLX5_FLOW_NAMESPACE_RDMA_TX, }; enum { @@ -84,6 +85,7 @@ enum { FDB_TC_OFFLOAD, FDB_FT_OFFLOAD, FDB_SLOW_PATH, + FDB_PER_VPORT, }; struct mlx5_pkt_reformat; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index bfdf41537cf1..69b27c7dfc3e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -414,8 +414,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_16[0x1]; u8 table_miss_action_domain[0x1]; u8 termination_table[0x1]; - u8 reserved_at_19[0x7]; - u8 reserved_at_20[0x2]; + u8 reformat_and_fwd_to_table[0x1]; + u8 reserved_at_1a[0x6]; + u8 termination_table_raw_traffic[0x1]; + u8 reserved_at_21[0x1]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; u8 max_modify_header_actions[0x8]; @@ -707,7 +709,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; - u8 reserved_at_a00[0x200]; + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; @@ -741,7 +743,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 flow_source[0x1]; u8 reserved_at_18[0x2]; u8 multi_fdb_encap[0x1]; - u8 reserved_at_1b[0x1]; + u8 egress_acl_forward_to_vport[0x1]; u8 fdb_multi_path_to_table[0x1]; u8 reserved_at_1d[0x3]; @@ -813,7 +815,9 @@ struct mlx5_ifc_qos_cap_bits { u8 reserved_at_4[0x1]; u8 packet_pacing_burst_bound[0x1]; u8 packet_pacing_typical_size[0x1]; - u8 reserved_at_7[0x19]; + u8 reserved_at_7[0x4]; + u8 packet_pacing_uid[0x1]; + u8 reserved_at_c[0x14]; u8 reserved_at_20[0x20]; @@ -875,7 +879,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 swp_csum[0x1]; u8 swp_lso[0x1]; u8 cqe_checksum_full[0x1]; - u8 reserved_at_24[0x5]; + u8 tunnel_stateless_geneve_tx[0x1]; + u8 tunnel_stateless_mpls_over_udp[0x1]; + u8 tunnel_stateless_mpls_over_gre[0x1]; + u8 tunnel_stateless_vxlan_gpe[0x1]; + u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; u8 reserved_at_2a[0x6]; u8 max_vxlan_udp_ports[0x8]; @@ -8265,9 +8273,20 @@ struct mlx5_ifc_set_pp_rate_limit_out_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_set_pp_rate_limit_context_bits { + u8 rate_limit[0x20]; + + u8 burst_upper_bound[0x20]; + + u8 reserved_at_40[0x10]; + u8 typical_packet_size[0x10]; + + u8 reserved_at_60[0x120]; +}; + struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8277,14 +8296,7 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 reserved_at_60[0x20]; - u8 rate_limit[0x20]; - - u8 burst_upper_bound[0x20]; - - u8 reserved_at_c0[0x10]; - u8 typical_packet_size[0x10]; - - u8 reserved_at_e0[0x120]; + struct mlx5_ifc_set_pp_rate_limit_context_bits ctx; }; struct mlx5_ifc_access_register_out_bits { @@ -8420,7 +8432,8 @@ struct mlx5_ifc_ptys_reg_bits { u8 proto_mask[0x3]; u8 an_status[0x4]; - u8 reserved_at_24[0x1c]; + u8 reserved_at_24[0xc]; + u8 data_rate_oper[0x10]; u8 ext_eth_proto_capability[0x20]; @@ -10486,7 +10499,8 @@ enum { }; enum { - MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1, + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1, + MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2, }; struct mlx5_ifc_tls_static_params_bits { diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 37e065a80a43..07d77323f78a 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -608,7 +608,7 @@ struct mlx5_ifc_tls_cmd_bits { struct mlx5_ifc_tls_resp_bits { u8 syndrome[0x20]; u8 stream_id[0x20]; - u8 reserverd[0x40]; + u8 reserved[0x40]; }; #define MLX5_TLS_COMMAND_SIZE (0x100) diff --git a/include/linux/mm.h b/include/linux/mm.h index 52269e56c514..e2f938c5a9d8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -27,6 +27,7 @@ #include <linux/memremap.h> #include <linux/overflow.h> #include <linux/sizes.h> +#include <linux/sched.h> struct mempolicy; struct anon_vma; @@ -356,10 +357,12 @@ extern unsigned int kobjsize(const void *objp); /* * Special vmas that are non-mergable, non-mlock()able. - * Note: mm/huge_memory.c VM_NO_THP depends on this definition. */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) +/* This mask prevents VMA from being scanned with khugepaged */ +#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) + /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE @@ -378,15 +381,75 @@ extern unsigned int kobjsize(const void *objp); */ extern pgprot_t protection_map[16]; -#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ -#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ -#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ -#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ -#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ -#define FAULT_FLAG_TRIED 0x20 /* Second try */ -#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ -#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ -#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ +/** + * Fault flag definitions. + * + * @FAULT_FLAG_WRITE: Fault was a write fault. + * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. + * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. + * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying. + * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. + * @FAULT_FLAG_TRIED: The fault has been tried once. + * @FAULT_FLAG_USER: The fault originated in userspace. + * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. + * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. + * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. + * + * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify + * whether we would allow page faults to retry by specifying these two + * fault flags correctly. Currently there can be three legal combinations: + * + * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and + * this is the first try + * + * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and + * we've already tried at least once + * + * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry + * + * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never + * be used. Note that page faults can be allowed to retry for multiple times, + * in which case we'll have an initial fault with flags (a) then later on + * continuous faults with flags (b). We should always try to detect pending + * signals before a retry to make sure the continuous page faults can still be + * interrupted if necessary. + */ +#define FAULT_FLAG_WRITE 0x01 +#define FAULT_FLAG_MKWRITE 0x02 +#define FAULT_FLAG_ALLOW_RETRY 0x04 +#define FAULT_FLAG_RETRY_NOWAIT 0x08 +#define FAULT_FLAG_KILLABLE 0x10 +#define FAULT_FLAG_TRIED 0x20 +#define FAULT_FLAG_USER 0x40 +#define FAULT_FLAG_REMOTE 0x80 +#define FAULT_FLAG_INSTRUCTION 0x100 +#define FAULT_FLAG_INTERRUPTIBLE 0x200 + +/* + * The default fault flags that should be used by most of the + * arch-specific page fault handlers. + */ +#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \ + FAULT_FLAG_KILLABLE | \ + FAULT_FLAG_INTERRUPTIBLE) + +/** + * fault_flag_allow_retry_first - check ALLOW_RETRY the first time + * + * This is mostly used for places where we want to try to avoid taking + * the mmap_sem for too long a time when waiting for another condition + * to change, in which case we can try to be polite to release the + * mmap_sem in the first round to avoid potential starvation of other + * processes that would also want the mmap_sem. + * + * Return: true if the page fault allows retry and this is the first + * attempt of the fault handling; false otherwise. + */ +static inline bool fault_flag_allow_retry_first(unsigned int flags) +{ + return (flags & FAULT_FLAG_ALLOW_RETRY) && + (!(flags & FAULT_FLAG_TRIED)); +} #define FAULT_FLAG_TRACE \ { FAULT_FLAG_WRITE, "WRITE" }, \ @@ -397,7 +460,8 @@ extern pgprot_t protection_map[16]; { FAULT_FLAG_TRIED, "TRIED" }, \ { FAULT_FLAG_USER, "USER" }, \ { FAULT_FLAG_REMOTE, "REMOTE" }, \ - { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } + { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \ + { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } /* * vm_fault is filled by the the pagefault handler and passed to the vma's @@ -541,6 +605,36 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma) return !vma->vm_ops; } +static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) +{ + int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); + + if (!maybe_stack) + return false; + + if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == + VM_STACK_INCOMPLETE_SETUP) + return true; + + return false; +} + +static inline bool vma_is_foreign(struct vm_area_struct *vma) +{ + if (!current->mm) + return true; + + if (current->mm != vma->vm_mm) + return true; + + return false; +} + +static inline bool vma_is_accessible(struct vm_area_struct *vma) +{ + return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC); +} + #ifdef CONFIG_SHMEM /* * The vma_is_shmem is not inline because it is used only by slow @@ -770,6 +864,24 @@ static inline unsigned int compound_order(struct page *page) return page[1].compound_order; } +static inline bool hpage_pincount_available(struct page *page) +{ + /* + * Can the page->hpage_pinned_refcount field be used? That field is in + * the 3rd page of the compound page, so the smallest (2-page) compound + * pages cannot support it. + */ + page = compound_head(page); + return PageCompound(page) && compound_order(page) > 1; +} + +static inline int compound_pincount(struct page *page) +{ + VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); + page = compound_head(page); + return atomic_read(compound_pincount_ptr(page)); +} + static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; @@ -1001,6 +1113,8 @@ static inline void get_page(struct page *page) page_ref_inc(page); } +bool __must_check try_grab_page(struct page *page, unsigned int flags); + static inline __must_check bool try_get_page(struct page *page) { page = compound_head(page); @@ -1029,29 +1143,87 @@ static inline void put_page(struct page *page) __put_page(page); } -/** - * unpin_user_page() - release a gup-pinned page - * @page: pointer to page to be released +/* + * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload + * the page's refcount so that two separate items are tracked: the original page + * reference count, and also a new count of how many pin_user_pages() calls were + * made against the page. ("gup-pinned" is another term for the latter). + * + * With this scheme, pin_user_pages() becomes special: such pages are marked as + * distinct from normal pages. As such, the unpin_user_page() call (and its + * variants) must be used in order to release gup-pinned pages. * - * Pages that were pinned via pin_user_pages*() must be released via either - * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so - * that eventually such pages can be separately tracked and uniquely handled. In - * particular, interactions with RDMA and filesystems need special handling. + * Choice of value: * - * unpin_user_page() and put_page() are not interchangeable, despite this early - * implementation that makes them look the same. unpin_user_page() calls must - * be perfectly matched up with pin*() calls. + * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference + * counts with respect to pin_user_pages() and unpin_user_page() becomes + * simpler, due to the fact that adding an even power of two to the page + * refcount has the effect of using only the upper N bits, for the code that + * counts up using the bias value. This means that the lower bits are left for + * the exclusive use of the original code that increments and decrements by one + * (or at least, by much smaller values than the bias value). + * + * Of course, once the lower bits overflow into the upper bits (and this is + * OK, because subtraction recovers the original values), then visual inspection + * no longer suffices to directly view the separate counts. However, for normal + * applications that don't have huge page reference counts, this won't be an + * issue. + * + * Locking: the lockless algorithm described in page_cache_get_speculative() + * and page_cache_gup_pin_speculative() provides safe operation for + * get_user_pages and page_mkclean and other calls that race to set up page + * table entries. */ -static inline void unpin_user_page(struct page *page) -{ - put_page(page); -} +#define GUP_PIN_COUNTING_BIAS (1U << 10) +void unpin_user_page(struct page *page); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); - void unpin_user_pages(struct page **pages, unsigned long npages); +/** + * page_maybe_dma_pinned() - report if a page is pinned for DMA. + * + * This function checks if a page has been pinned via a call to + * pin_user_pages*(). + * + * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, + * because it means "definitely not pinned for DMA", but true means "probably + * pinned for DMA, but possibly a false positive due to having at least + * GUP_PIN_COUNTING_BIAS worth of normal page references". + * + * False positives are OK, because: a) it's unlikely for a page to get that many + * refcounts, and b) all the callers of this routine are expected to be able to + * deal gracefully with a false positive. + * + * For huge pages, the result will be exactly correct. That's because we have + * more tracking data available: the 3rd struct page in the compound page is + * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS + * scheme). + * + * For more information, please see Documentation/vm/pin_user_pages.rst. + * + * @page: pointer to page to be queried. + * @Return: True, if it is likely that the page has been "dma-pinned". + * False, if the page is definitely not dma-pinned. + */ +static inline bool page_maybe_dma_pinned(struct page *page) +{ + if (hpage_pincount_available(page)) + return compound_pincount(page) > 0; + + /* + * page_ref_count() is signed. If that refcount overflows, then + * page_ref_count() returns a negative value, and callers will avoid + * further incrementing the refcount. + * + * Here, for that overflow case, use the signed bit to count a little + * bit higher via unsigned math, and thus still get an accurate result. + */ + return ((unsigned int)page_ref_count(compound_head(page))) >= + GUP_PIN_COUNTING_BIAS; +} + #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS #endif @@ -1599,9 +1771,26 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks); + +/* + * Flags used by change_protection(). For now we make it a bitmap so + * that we can pass in multiple flags just like parameters. However + * for now all the callers are only use one of the flags at the same + * time. + */ +/* Whether we should allow dirty bit accounting */ +#define MM_CP_DIRTY_ACCT (1UL << 0) +/* Whether this protection change is for NUMA hints */ +#define MM_CP_PROT_NUMA (1UL << 1) +/* Whether this change is for write protecting */ +#define MM_CP_UFFD_WP (1UL << 2) /* do wp */ +#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ +#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ + MM_CP_UFFD_WP_RESOLVE) + extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, - int dirty_accountable, int prot_numa); + unsigned long cp_flags); extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); @@ -2364,26 +2553,7 @@ struct vm_unmapped_area_info { unsigned long align_offset; }; -extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); -extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); - -/* - * Search for an unmapped address range. - * - * We are looking for a range that: - * - does not intersect with any VMA; - * - is contained within the [low_limit, high_limit) interval; - * - is at least the desired size. - * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) - */ -static inline unsigned long -vm_unmapped_area(struct vm_unmapped_area_info *info) -{ - if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) - return unmapped_area_topdown(info); - else - return unmapped_area(info); -} +extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); /* truncate.c */ extern void truncate_inode_pages(struct address_space *, loff_t); @@ -2715,6 +2885,10 @@ static inline bool debug_pagealloc_enabled_static(void) #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) extern void __kernel_map_pages(struct page *page, int numpages, int enable); +/* + * When called in DEBUG_PAGEALLOC context, the call should most likely be + * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static() + */ static inline void kernel_map_pages(struct page *page, int numpages, int enable) { @@ -2863,6 +3037,23 @@ extern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, unsigned int pages_per_huge_page, bool allow_pagefault); + +/** + * vma_is_special_huge - Are transhuge page-table entries considered special? + * @vma: Pointer to the struct vm_area_struct to consider + * + * Whether transhuge page-table entries are considered "special" following + * the definition in vm_normal_page(). + * + * Return: true if transhuge page-table entries should be considered special, + * false otherwise. + */ +static inline bool vma_is_special_huge(const struct vm_area_struct *vma) +{ + return vma_is_dax(vma) || (vma->vm_file && + (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); +} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #ifdef CONFIG_DEBUG_PAGEALLOC diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 6f2fef7b0784..219bef41d87c 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -6,19 +6,20 @@ #include <linux/swap.h> /** - * page_is_file_cache - should the page be on a file LRU or anon LRU? + * page_is_file_lru - should the page be on a file LRU or anon LRU? * @page: the page to test * - * Returns 1 if @page is page cache page backed by a regular filesystem, - * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed. - * Used by functions that manipulate the LRU lists, to sort a page - * onto the right LRU list. + * Returns 1 if @page is a regular filesystem backed page cache page or a lazily + * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal + * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by + * functions that manipulate the LRU lists, to sort a page onto the right LRU + * list. * * We would like to get this info without a page flag, but the state * needs to survive until the page is last deleted from the LRU, which * could be as far down as __page_cache_release. */ -static inline int page_is_file_cache(struct page *page) +static inline int page_is_file_lru(struct page *page) { return !PageSwapBacked(page); } @@ -75,7 +76,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, */ static inline enum lru_list page_lru_base_type(struct page *page) { - if (page_is_file_cache(page)) + if (page_is_file_lru(page)) return LRU_INACTIVE_FILE; return LRU_INACTIVE_ANON; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c28911c3afa8..4aba6c0c2ba8 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -137,7 +137,7 @@ struct page { }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ - unsigned long _compound_pad_2; + atomic_t hpage_pinned_refcount; /* For both global and memcg */ struct list_head deferred_list; }; @@ -226,6 +226,11 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page) return &page[1].compound_mapcount; } +static inline atomic_t *compound_pincount_ptr(struct page *page) +{ + return &page[2].hpage_pinned_refcount; +} + /* * Used for sizing the vmemmap region on some architectures */ @@ -284,8 +289,8 @@ struct vm_userfaultfd_ctx {}; #endif /* CONFIG_USERFAULTFD */ /* - * This struct defines a memory VMM memory area. There is one of these - * per VM-area/task. A VM area is any part of the process virtual memory + * This struct describes a virtual memory area. There is one of these + * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index b7ba8810a3b5..29aa50711626 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -107,9 +107,6 @@ struct mmc_command { */ unsigned int busy_timeout; /* busy detect timeout in ms */ - /* Set this flag only for blocking sanitize request */ - bool sanitize_busy; - struct mmc_data *data; /* data segment associated with cmd */ struct mmc_request *mrq; /* associated request */ }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index ba703384bea0..c318fb5b6a94 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -322,6 +322,8 @@ struct mmc_host { #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ +#define MMC_CAP_DDR (MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | \ + MMC_CAP_1_2V_DDR) #define MMC_CAP_POWER_OFF_CARD (1 << 14) /* Can power off after boot */ #define MMC_CAP_BUS_WIDTH_TEST (1 << 15) /* CMD14/CMD19 bus width ok */ #define MMC_CAP_UHS_SDR12 (1 << 16) /* Host supports UHS SDR12 mode */ @@ -333,6 +335,7 @@ struct mmc_host { MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ MMC_CAP_UHS_DDR50) #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */ +#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ @@ -462,7 +465,10 @@ struct mmc_host { bool cqe_enabled; bool cqe_on; - unsigned long private[0] ____cacheline_aligned; + /* Host Software Queue support */ + bool hsq_enabled; + + unsigned long private[] ____cacheline_aligned; }; struct device_node; diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 897a87c4c827..4b85ef05a906 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -161,6 +161,16 @@ static inline bool mmc_op_multi(u32 opcode) #define R1_STATE_PRG 7 #define R1_STATE_DIS 8 +static inline bool mmc_ready_for_data(u32 status) +{ + /* + * Some cards mishandle the status bits, so make sure to check both the + * busy indication and the card state. + */ + return status & R1_READY_FOR_DATA && + R1_CURRENT_STATE(status) == R1_STATE_TRAN; +} + /* * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS * R1 is the low order byte; R2 is the next highest byte, when present. diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 5a177f7a83c3..fa2aaab5e57a 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -25,7 +25,7 @@ struct sdio_func_tuple { struct sdio_func_tuple *next; unsigned char code; unsigned char size; - unsigned char data[0]; + unsigned char data[]; }; /* diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 462f6873905a..e9892bf9eba9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -100,41 +100,6 @@ struct free_area { unsigned long nr_free; }; -/* Used for pages not on another list */ -static inline void add_to_free_area(struct page *page, struct free_area *area, - int migratetype) -{ - list_add(&page->lru, &area->free_list[migratetype]); - area->nr_free++; -} - -/* Used for pages not on another list */ -static inline void add_to_free_area_tail(struct page *page, struct free_area *area, - int migratetype) -{ - list_add_tail(&page->lru, &area->free_list[migratetype]); - area->nr_free++; -} - -#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR -/* Used to preserve page allocation order entropy */ -void add_to_free_area_random(struct page *page, struct free_area *area, - int migratetype); -#else -static inline void add_to_free_area_random(struct page *page, - struct free_area *area, int migratetype) -{ - add_to_free_area(page, area, migratetype); -} -#endif - -/* Used for pages which are on another list */ -static inline void move_to_free_area(struct page *page, struct free_area *area, - int migratetype) -{ - list_move(&page->lru, &area->free_list[migratetype]); -} - static inline struct page *get_page_from_free_area(struct free_area *area, int migratetype) { @@ -142,15 +107,6 @@ static inline struct page *get_page_from_free_area(struct free_area *area, struct page, lru); } -static inline void del_page_from_free_area(struct page *page, - struct free_area *area) -{ - list_del(&page->lru); - __ClearPageBuddy(page); - set_page_private(page, 0); - area->nr_free--; -} - static inline bool free_area_empty(struct free_area *area, int migratetype) { return list_empty(&area->free_list[migratetype]); @@ -243,6 +199,8 @@ enum node_stat_item { NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ + NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ + NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ NR_VM_NODE_STAT_ITEMS }; @@ -706,7 +664,6 @@ struct deferred_split { * Memory statistics and page replacement data structures are maintained on a * per-zone basis. */ -struct bootmem_data; typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; struct zonelist node_zonelists[MAX_ZONELISTS]; @@ -1185,7 +1142,9 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec) #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) struct mem_section_usage { +#ifdef CONFIG_SPARSEMEM_VMEMMAP DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); +#endif /* See declaration of similar field in struct zone */ unsigned long pageblock_flags[0]; }; @@ -1372,7 +1331,7 @@ static inline int pfn_valid(unsigned long pfn) } #endif -static inline int pfn_present(unsigned long pfn) +static inline int pfn_in_present_section(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; @@ -1409,7 +1368,7 @@ void sparse_init(void); #else #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) -#define pfn_present pfn_valid +#define pfn_in_present_section pfn_valid #define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index e3596db077dc..4c2ddd0941a7 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -667,9 +667,7 @@ struct x86_cpu_id { kernel_ulong_t driver_data; }; -#define X86_FEATURE_MATCH(x) \ - { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x } - +/* Wild cards for x86_cpu_id::vendor, family, model and feature */ #define X86_VENDOR_ANY 0xffff #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 @@ -821,4 +819,17 @@ struct wmi_device_id { const void *context; }; +#define MHI_DEVICE_MODALIAS_FMT "mhi:%s" +#define MHI_NAME_SIZE 32 + +/** + * struct mhi_device_id - MHI device identification + * @chan: MHI channel name + * @driver_data: driver data; + */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/most.h b/include/linux/most.h new file mode 100644 index 000000000000..232e01b7f5d2 --- /dev/null +++ b/include/linux/most.h @@ -0,0 +1,337 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * most.h - API for component and adapter drivers + * + * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG + */ + +#ifndef __MOST_CORE_H__ +#define __MOST_CORE_H__ + +#include <linux/types.h> +#include <linux/device.h> + +struct module; +struct interface_private; + +/** + * Interface type + */ +enum most_interface_type { + ITYPE_LOOPBACK = 1, + ITYPE_I2C, + ITYPE_I2S, + ITYPE_TSI, + ITYPE_HBI, + ITYPE_MEDIALB_DIM, + ITYPE_MEDIALB_DIM2, + ITYPE_USB, + ITYPE_PCIE +}; + +/** + * Channel direction. + */ +enum most_channel_direction { + MOST_CH_RX = 1 << 0, + MOST_CH_TX = 1 << 1, +}; + +/** + * Channel data type. + */ +enum most_channel_data_type { + MOST_CH_CONTROL = 1 << 0, + MOST_CH_ASYNC = 1 << 1, + MOST_CH_ISOC = 1 << 2, + MOST_CH_SYNC = 1 << 5, +}; + +enum most_status_flags { + /* MBO was processed successfully (data was send or received )*/ + MBO_SUCCESS = 0, + /* The MBO contains wrong or missing information. */ + MBO_E_INVAL, + /* MBO was completed as HDM Channel will be closed */ + MBO_E_CLOSE, +}; + +/** + * struct most_channel_capability - Channel capability + * @direction: Supported channel directions. + * The value is bitwise OR-combination of the values from the + * enumeration most_channel_direction. Zero is allowed value and means + * "channel may not be used". + * @data_type: Supported channel data types. + * The value is bitwise OR-combination of the values from the + * enumeration most_channel_data_type. Zero is allowed value and means + * "channel may not be used". + * @num_buffers_packet: Maximum number of buffers supported by this channel + * for packet data types (Async,Control,QoS) + * @buffer_size_packet: Maximum buffer size supported by this channel + * for packet data types (Async,Control,QoS) + * @num_buffers_streaming: Maximum number of buffers supported by this channel + * for streaming data types (Sync,AV Packetized) + * @buffer_size_streaming: Maximum buffer size supported by this channel + * for streaming data types (Sync,AV Packetized) + * @name_suffix: Optional suffix providean by an HDM that is attached to the + * regular channel name. + * + * Describes the capabilities of a MOST channel like supported Data Types + * and directions. This information is provided by an HDM for the MostCore. + * + * The Core creates read only sysfs attribute files in + * /sys/devices/most/mdev#/<channel>/ with the + * following attributes: + * -available_directions + * -available_datatypes + * -number_of_packet_buffers + * -number_of_stream_buffers + * -size_of_packet_buffer + * -size_of_stream_buffer + * where content of each file is a string with all supported properties of this + * very channel attribute. + */ +struct most_channel_capability { + u16 direction; + u16 data_type; + u16 num_buffers_packet; + u16 buffer_size_packet; + u16 num_buffers_streaming; + u16 buffer_size_streaming; + const char *name_suffix; +}; + +/** + * struct most_channel_config - stores channel configuration + * @direction: direction of the channel + * @data_type: data type travelling over this channel + * @num_buffers: number of buffers + * @buffer_size: size of a buffer for AIM. + * Buffer size may be cutted down by HDM in a configure callback + * to match to a given interface and channel type. + * @extra_len: additional buffer space for internal HDM purposes like padding. + * May be set by HDM in a configure callback if needed. + * @subbuffer_size: size of a subbuffer + * @packets_per_xact: number of MOST frames that are packet inside one USB + * packet. This is USB specific + * + * Describes the configuration for a MOST channel. This information is + * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a + * parameter of the "configure" function call. + */ +struct most_channel_config { + enum most_channel_direction direction; + enum most_channel_data_type data_type; + u16 num_buffers; + u16 buffer_size; + u16 extra_len; + u16 subbuffer_size; + u16 packets_per_xact; + u16 dbr_size; +}; + +/* + * struct mbo - MOST Buffer Object. + * @context: context for core completion handler + * @priv: private data for HDM + * + * public: documented fields that are used for the communications + * between MostCore and HDMs + * + * @list: list head for use by the mbo's current owner + * @ifp: (in) associated interface instance + * @num_buffers_ptr: amount of pool buffers + * @hdm_channel_id: (in) HDM channel instance + * @virt_address: (in) kernel virtual address of the buffer + * @bus_address: (in) bus address of the buffer + * @buffer_length: (in) buffer payload length + * @processed_length: (out) processed length + * @status: (out) transfer status + * @complete: (in) completion routine + * + * The core allocates and initializes the MBO. + * + * The HDM receives MBO for transfer from the core with the call to enqueue(). + * The HDM copies the data to- or from the buffer depending on configured + * channel direction, set "processed_length" and "status" and completes + * the transfer procedure by calling the completion routine. + * + * Finally, the MBO is being deallocated or recycled for further + * transfers of the same or a different HDM. + * + * Directions of usage: + * The core driver should never access any MBO fields (even if marked + * as "public") while the MBO is owned by an HDM. The ownership starts with + * the call of enqueue() and ends with the call of its complete() routine. + * + * II. + * Every HDM attached to the core driver _must_ ensure that it returns any MBO + * it owns (due to a previous call to enqueue() by the core driver) before it + * de-registers an interface or gets unloaded from the kernel. If this direction + * is violated memory leaks will occur, since the core driver does _not_ track + * MBOs it is currently not in control of. + * + */ +struct mbo { + void *context; + void *priv; + struct list_head list; + struct most_interface *ifp; + int *num_buffers_ptr; + u16 hdm_channel_id; + void *virt_address; + dma_addr_t bus_address; + u16 buffer_length; + u16 processed_length; + enum most_status_flags status; + void (*complete)(struct mbo *mbo); +}; + +/** + * Interface instance description. + * + * Describes an interface of a MOST device the core driver is bound to. + * This structure is allocated and initialized in the HDM. MostCore may not + * modify this structure. + * + * @dev: the actual device + * @mod: module + * @interface Interface type. \sa most_interface_type. + * @description PRELIMINARY. + * Unique description of the device instance from point of view of the + * interface in free text form (ASCII). + * It may be a hexadecimal presentation of the memory address for the MediaLB + * IP or USB device ID with USB properties for USB interface, etc. + * @num_channels Number of channels and size of the channel_vector. + * @channel_vector Properties of the channels. + * Array index represents channel ID by the driver. + * @configure Callback to change data type for the channel of the + * interface instance. May be zero if the instance of the interface is not + * configurable. Parameter channel_config describes direction and data + * type for the channel, configured by the higher level. The content of + * @enqueue Delivers MBO to the HDM for processing. + * After HDM completes Rx- or Tx- operation the processed MBO shall + * be returned back to the MostCore using completion routine. + * The reason to get the MBO delivered from the MostCore after the channel + * is poisoned is the re-opening of the channel by the application. + * In this case the HDM shall hold MBOs and service the channel as usual. + * The HDM must be able to hold at least one MBO for each channel. + * The callback returns a negative value on error, otherwise 0. + * @poison_channel Informs HDM about closing the channel. The HDM shall + * cancel all transfers and synchronously or asynchronously return + * all enqueued for this channel MBOs using the completion routine. + * The callback returns a negative value on error, otherwise 0. + * @request_netinfo: triggers retrieving of network info from the HDM by + * means of "Message exchange over MDP/MEP" + * The call of the function request_netinfo with the parameter on_netinfo as + * NULL prohibits use of the previously obtained function pointer. + * @priv Private field used by mostcore to store context information. + */ +struct most_interface { + struct device *dev; + struct device *driver_dev; + struct module *mod; + enum most_interface_type interface; + const char *description; + unsigned int num_channels; + struct most_channel_capability *channel_vector; + void *(*dma_alloc)(struct mbo *mbo, u32 size); + void (*dma_free)(struct mbo *mbo, u32 size); + int (*configure)(struct most_interface *iface, int channel_idx, + struct most_channel_config *channel_config); + int (*enqueue)(struct most_interface *iface, int channel_idx, + struct mbo *mbo); + int (*poison_channel)(struct most_interface *iface, int channel_idx); + void (*request_netinfo)(struct most_interface *iface, int channel_idx, + void (*on_netinfo)(struct most_interface *iface, + unsigned char link_stat, + unsigned char *mac_addr)); + void *priv; + struct interface_private *p; +}; + +/** + * struct most_component - identifies a loadable component for the mostcore + * @list: list_head + * @name: component name + * @probe_channel: function for core to notify driver about channel connection + * @disconnect_channel: callback function to disconnect a certain channel + * @rx_completion: completion handler for received packets + * @tx_completion: completion handler for transmitted packets + */ +struct most_component { + struct list_head list; + const char *name; + struct module *mod; + int (*probe_channel)(struct most_interface *iface, int channel_idx, + struct most_channel_config *cfg, char *name, + char *param); + int (*disconnect_channel)(struct most_interface *iface, + int channel_idx); + int (*rx_completion)(struct mbo *mbo); + int (*tx_completion)(struct most_interface *iface, int channel_idx); + int (*cfg_complete)(void); +}; + +/** + * most_register_interface - Registers instance of the interface. + * @iface: Pointer to the interface instance description. + * + * Returns a pointer to the kobject of the generated instance. + * + * Note: HDM has to ensure that any reference held on the kobj is + * released before deregistering the interface. + */ +int most_register_interface(struct most_interface *iface); + +/** + * Deregisters instance of the interface. + * @intf_instance Pointer to the interface instance description. + */ +void most_deregister_interface(struct most_interface *iface); +void most_submit_mbo(struct mbo *mbo); + +/** + * most_stop_enqueue - prevents core from enqueing MBOs + * @iface: pointer to interface + * @channel_idx: channel index + */ +void most_stop_enqueue(struct most_interface *iface, int channel_idx); + +/** + * most_resume_enqueue - allow core to enqueue MBOs again + * @iface: pointer to interface + * @channel_idx: channel index + * + * This clears the enqueue halt flag and enqueues all MBOs currently + * in wait fifo. + */ +void most_resume_enqueue(struct most_interface *iface, int channel_idx); +int most_register_component(struct most_component *comp); +int most_deregister_component(struct most_component *comp); +struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx, + struct most_component *comp); +void most_put_mbo(struct mbo *mbo); +int channel_has_mbo(struct most_interface *iface, int channel_idx, + struct most_component *comp); +int most_start_channel(struct most_interface *iface, int channel_idx, + struct most_component *comp); +int most_stop_channel(struct most_interface *iface, int channel_idx, + struct most_component *comp); +int __init configfs_init(void); +int most_register_configfs_subsys(struct most_component *comp); +void most_deregister_configfs_subsys(struct most_component *comp); +int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name, + char *comp_param); +int most_remove_link(char *mdev, char *mdev_ch, char *comp_name); +int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val); +int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val); +int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val); +int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val); +int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf); +int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf); +int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val); +int most_cfg_complete(char *comp_name); +void most_interface_register_notify(const char *mdev_name); +#endif /* MOST_CORE_H_ */ diff --git a/include/linux/msdos_partition.h b/include/linux/msdos_partition.h new file mode 100644 index 000000000000..2cb82db2a43c --- /dev/null +++ b/include/linux/msdos_partition.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MSDOS_PARTITION_H +#define _LINUX_MSDOS_PARTITION_H + +#define MSDOS_LABEL_MAGIC 0xAA55 + +struct msdos_partition { + u8 boot_ind; /* 0x80 - active */ + u8 head; /* starting head */ + u8 sector; /* starting sector */ + u8 cyl; /* starting cylinder */ + u8 sys_ind; /* What partition type */ + u8 end_head; /* end head */ + u8 end_sector; /* end sector */ + u8 end_cyl; /* end cylinder */ + __le32 start_sect; /* starting sector counting from 0 */ + __le32 nr_sects; /* nr of sectors in partition */ +} __packed; + +enum msdos_sys_ind { + /* + * These three have identical behaviour; use the second one if DOS FDISK + * gets confused about extended/logical partitions starting past + * cylinder 1023. + */ + DOS_EXTENDED_PARTITION = 5, + LINUX_EXTENDED_PARTITION = 0x85, + WIN98_EXTENDED_PARTITION = 0x0f, + + LINUX_DATA_PARTITION = 0x83, + LINUX_LVM_PARTITION = 0x8e, + LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ + + SOLARIS_X86_PARTITION = 0x82, /* also Linux swap partitions */ + NEW_SOLARIS_X86_PARTITION = 0xbf, + + DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */ + DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */ + DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */ + EZD_PARTITION = 0x55, /* EZ-DRIVE */ + + FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */ + OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */ + NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */ + BSDI_PARTITION = 0xb7, /* BSDI Partition ID */ + MINIX_PARTITION = 0x81, /* Minix Partition ID */ + UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ +}; + +#endif /* LINUX_MSDOS_PARTITION_H */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 249e8d9bfbcd..2d1f4a61f4ac 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -8,6 +8,7 @@ #include <linux/types.h> #include <linux/uio.h> +#include <linux/list.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/of.h> @@ -194,10 +195,43 @@ struct mtd_debug_info { const char *partid; }; +/** + * struct mtd_part - MTD partition specific fields + * + * @node: list node used to add an MTD partition to the parent partition list + * @offset: offset of the partition relatively to the parent offset + * @flags: original flags (before the mtdpart logic decided to tweak them based + * on flash constraints, like eraseblock/pagesize alignment) + * + * This struct is embedded in mtd_info and contains partition-specific + * properties/fields. + */ +struct mtd_part { + struct list_head node; + u64 offset; + u32 flags; +}; + +/** + * struct mtd_master - MTD master specific fields + * + * @partitions_lock: lock protecting accesses to the partition list. Protects + * not only the master partition list, but also all + * sub-partitions. + * @suspended: et to 1 when the device is suspended, 0 otherwise + * + * This struct is embedded in mtd_info and contains master-specific + * properties/fields. The master is the root MTD device from the MTD partition + * point of view. + */ +struct mtd_master { + struct mutex partitions_lock; + unsigned int suspended : 1; +}; + struct mtd_info { u_char type; uint32_t flags; - uint32_t orig_flags; /* Flags as before running mtd checks */ uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this @@ -339,8 +373,52 @@ struct mtd_info { int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; + + /* + * Parent device from the MTD partition point of view. + * + * MTD masters do not have any parent, MTD partitions do. The parent + * MTD device can itself be a partition. + */ + struct mtd_info *parent; + + /* List of partitions attached to this MTD device */ + struct list_head partitions; + + union { + struct mtd_part part; + struct mtd_master master; + }; }; +static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd) +{ + while (mtd->parent) + mtd = mtd->parent; + + return mtd; +} + +static inline u64 mtd_get_master_ofs(struct mtd_info *mtd, u64 ofs) +{ + while (mtd->parent) { + ofs += mtd->part.offset; + mtd = mtd->parent; + } + + return ofs; +} + +static inline bool mtd_is_partition(const struct mtd_info *mtd) +{ + return mtd->parent; +} + +static inline bool mtd_has_partitions(const struct mtd_info *mtd) +{ + return !list_empty(&mtd->partitions); +} + int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobecc); int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, @@ -392,13 +470,16 @@ static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) static inline int mtd_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) { - if (!mtd->_max_bad_blocks) + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->_max_bad_blocks) return -ENOTSUPP; if (mtd->size < (len + ofs) || ofs < 0) return -EINVAL; - return mtd->_max_bad_blocks(mtd, ofs, len); + return master->_max_bad_blocks(master, mtd_get_master_ofs(mtd, ofs), + len); } int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, @@ -439,8 +520,10 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, static inline void mtd_sync(struct mtd_info *mtd) { - if (mtd->_sync) - mtd->_sync(mtd); + struct mtd_info *master = mtd_get_master(mtd); + + if (master->_sync) + master->_sync(master); } int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); @@ -452,13 +535,31 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); static inline int mtd_suspend(struct mtd_info *mtd) { - return mtd->_suspend ? mtd->_suspend(mtd) : 0; + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + if (master->master.suspended) + return 0; + + ret = master->_suspend ? master->_suspend(master) : 0; + if (ret) + return ret; + + master->master.suspended = 1; + return 0; } static inline void mtd_resume(struct mtd_info *mtd) { - if (mtd->_resume) - mtd->_resume(mtd); + struct mtd_info *master = mtd_get_master(mtd); + + if (!master->master.suspended) + return; + + if (master->_resume) + master->_resume(master); + + master->master.suspended = 0; } static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) @@ -538,7 +639,9 @@ static inline loff_t mtd_wunit_to_offset(struct mtd_info *mtd, loff_t base, static inline int mtd_has_oob(const struct mtd_info *mtd) { - return mtd->_read_oob && mtd->_write_oob; + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); + + return master->_read_oob && master->_write_oob; } static inline int mtd_type_is_nand(const struct mtd_info *mtd) @@ -548,7 +651,9 @@ static inline int mtd_type_is_nand(const struct mtd_info *mtd) static inline int mtd_can_have_bb(const struct mtd_info *mtd) { - return !!mtd->_block_isbad; + struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd); + + return !!master->_block_isbad; } /* Kernel-side ioctl definitions */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 11cb0c50cd84..e545c050d3e8 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -105,7 +105,6 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser); module_driver(__mtd_part_parser, register_mtd_parser, \ deregister_mtd_parser) -int mtd_is_partition(const struct mtd_info *mtd); int mtd_add_partition(struct mtd_info *master, const char *name, long long offset, long long length); int mtd_del_partition(struct mtd_info *master, int partno); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 4ab9bccfcde0..1e76196f9829 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1064,6 +1064,8 @@ struct nand_legacy { * @lock: lock protecting the suspended field. Also used to * serialize accesses to the NAND device. * @suspended: set to 1 when the device is suspended, 0 when it's not. + * @suspend: [REPLACEABLE] specific NAND device suspend operation + * @resume: [REPLACEABLE] specific NAND device resume operation * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1077,6 +1079,8 @@ struct nand_legacy { * @manufacturer: [INTERN] Contains manufacturer information * @manufacturer.desc: [INTERN] Contains manufacturer's description * @manufacturer.priv: [INTERN] Contains manufacturer private information + * @lock_area: [REPLACEABLE] specific NAND chip lock operation + * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation */ struct nand_chip { @@ -1117,6 +1121,8 @@ struct nand_chip { struct mutex lock; unsigned int suspended : 1; + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); uint8_t *oob_poi; struct nand_controller *controller; @@ -1136,6 +1142,9 @@ struct nand_chip { const struct nand_manufacturer *desc; void *priv; } manufacturer; + + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; @@ -1215,7 +1224,7 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * struct nand_flash_dev - NAND Flash Device ID Structure * @name: a human-readable name of the NAND chip * @dev_id: the device ID (the second byte of the full chip ID array) - * @mfr_id: manufecturer ID part of the full chip ID array (refers the same + * @mfr_id: manufacturer ID part of the full chip ID array (refers the same * memory address as ``id[0]``) * @dev_id: device ID part of the full chip ID array (refers the same memory * address as ``id[1]``) diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 5abd91cc6dfa..1e2af0ec1f03 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -12,23 +12,6 @@ #include <linux/spi/spi-mem.h> /* - * Manufacturer IDs - * - * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. - * Sometimes these are the same as CFI IDs, but sometimes they aren't. - */ -#define SNOR_MFR_ATMEL CFI_MFR_ATMEL -#define SNOR_MFR_GIGADEVICE 0xc8 -#define SNOR_MFR_INTEL CFI_MFR_INTEL -#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */ -#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */ -#define SNOR_MFR_ISSI CFI_MFR_PMC -#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX -#define SNOR_MFR_SPANSION CFI_MFR_AMD -#define SNOR_MFR_SST CFI_MFR_SST -#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ - -/* * Note on opcode nomenclature: some opcodes have a format like * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number * of I/O lines used for the opcode, address, and data (respectively). The @@ -128,7 +111,9 @@ #define SR_BP0 BIT(2) /* Block protect 0 */ #define SR_BP1 BIT(3) /* Block protect 1 */ #define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_BP3 BIT(5) /* Block protect 3 */ #define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */ +#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */ #define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */ #define SR_SRWD BIT(7) /* SR write protect */ /* Spansion/Cypress specific status bits */ @@ -137,6 +122,8 @@ #define SR1_QUAD_EN_BIT6 BIT(6) +#define SR_BP_SHIFT 2 + /* Enhanced Volatile Configuration Register bits */ #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ @@ -225,110 +212,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) return spi_nor_get_protocol_data_nbits(proto); } -enum spi_nor_option_flags { - SNOR_F_USE_FSR = BIT(0), - SNOR_F_HAS_SR_TB = BIT(1), - SNOR_F_NO_OP_CHIP_ERASE = BIT(2), - SNOR_F_READY_XSR_RDY = BIT(3), - SNOR_F_USE_CLSR = BIT(4), - SNOR_F_BROKEN_RESET = BIT(5), - SNOR_F_4B_OPCODES = BIT(6), - SNOR_F_HAS_4BAIT = BIT(7), - SNOR_F_HAS_LOCK = BIT(8), - SNOR_F_HAS_16BIT_SR = BIT(9), - SNOR_F_NO_READ_CR = BIT(10), - SNOR_F_HAS_SR_TB_BIT6 = BIT(11), - -}; - -/** - * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type - * @size: the size of the sector/block erased by the erase type. - * JEDEC JESD216B imposes erase sizes to be a power of 2. - * @size_shift: @size is a power of 2, the shift is stored in - * @size_shift. - * @size_mask: the size mask based on @size_shift. - * @opcode: the SPI command op code to erase the sector/block. - * @idx: Erase Type index as sorted in the Basic Flash Parameter - * Table. It will be used to synchronize the supported - * Erase Types with the ones identified in the SFDP - * optional tables. - */ -struct spi_nor_erase_type { - u32 size; - u32 size_shift; - u32 size_mask; - u8 opcode; - u8 idx; -}; - -/** - * struct spi_nor_erase_command - Used for non-uniform erases - * The structure is used to describe a list of erase commands to be executed - * once we validate that the erase can be performed. The elements in the list - * are run-length encoded. - * @list: for inclusion into the list of erase commands. - * @count: how many times the same erase command should be - * consecutively used. - * @size: the size of the sector/block erased by the command. - * @opcode: the SPI command op code to erase the sector/block. - */ -struct spi_nor_erase_command { - struct list_head list; - u32 count; - u32 size; - u8 opcode; -}; - -/** - * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region - * @offset: the offset in the data array of erase region start. - * LSB bits are used as a bitmask encoding flags to - * determine if this region is overlaid, if this region is - * the last in the SPI NOR flash memory and to indicate - * all the supported erase commands inside this region. - * The erase types are sorted in ascending order with the - * smallest Erase Type size being at BIT(0). - * @size: the size of the region in bytes. - */ -struct spi_nor_erase_region { - u64 offset; - u64 size; -}; - -#define SNOR_ERASE_TYPE_MAX 4 -#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0) - -#define SNOR_LAST_REGION BIT(4) -#define SNOR_OVERLAID_REGION BIT(5) - -#define SNOR_ERASE_FLAGS_MAX 6 -#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0) - -/** - * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map - * @regions: array of erase regions. The regions are consecutive in - * address space. Walking through the regions is done - * incrementally. - * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform - * sector size (legacy implementation). - * @erase_type: an array of erase types shared by all the regions. - * The erase types are sorted in ascending order, with the - * smallest Erase Type size being the first member in the - * erase_type array. - * @uniform_erase_type: bitmask encoding erase types that can erase the - * entire memory. This member is completed at init by - * uniform and non-uniform SPI NOR flash memories if they - * support at least one erase type that can erase the - * entire memory. - */ -struct spi_nor_erase_map { - struct spi_nor_erase_region *regions; - struct spi_nor_erase_region uniform_region; - struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX]; - u8 uniform_erase_type; -}; - /** * struct spi_nor_hwcaps - Structure for describing the hardware capabilies * supported by the SPI controller (bus master). @@ -404,61 +287,7 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ SNOR_HWCAPS_PP_MASK) -struct spi_nor_read_command { - u8 num_mode_clocks; - u8 num_wait_states; - u8 opcode; - enum spi_nor_protocol proto; -}; - -struct spi_nor_pp_command { - u8 opcode; - enum spi_nor_protocol proto; -}; - -enum spi_nor_read_command_index { - SNOR_CMD_READ, - SNOR_CMD_READ_FAST, - SNOR_CMD_READ_1_1_1_DTR, - - /* Dual SPI */ - SNOR_CMD_READ_1_1_2, - SNOR_CMD_READ_1_2_2, - SNOR_CMD_READ_2_2_2, - SNOR_CMD_READ_1_2_2_DTR, - - /* Quad SPI */ - SNOR_CMD_READ_1_1_4, - SNOR_CMD_READ_1_4_4, - SNOR_CMD_READ_4_4_4, - SNOR_CMD_READ_1_4_4_DTR, - - /* Octal SPI */ - SNOR_CMD_READ_1_1_8, - SNOR_CMD_READ_1_8_8, - SNOR_CMD_READ_8_8_8, - SNOR_CMD_READ_1_8_8_DTR, - - SNOR_CMD_READ_MAX -}; - -enum spi_nor_pp_command_index { - SNOR_CMD_PP, - - /* Quad SPI */ - SNOR_CMD_PP_1_1_4, - SNOR_CMD_PP_1_4_4, - SNOR_CMD_PP_4_4_4, - - /* Octal SPI */ - SNOR_CMD_PP_1_1_8, - SNOR_CMD_PP_1_8_8, - SNOR_CMD_PP_8_8_8, - - SNOR_CMD_PP_MAX -}; - -/* Forward declaration that will be used in 'struct spi_nor_flash_parameter' */ +/* Forward declaration that is used in 'struct spi_nor_controller_ops' */ struct spi_nor; /** @@ -489,68 +318,13 @@ struct spi_nor_controller_ops { int (*erase)(struct spi_nor *nor, loff_t offs); }; -/** - * struct spi_nor_locking_ops - SPI NOR locking methods - * @lock: lock a region of the SPI NOR. - * @unlock: unlock a region of the SPI NOR. - * @is_locked: check if a region of the SPI NOR is completely locked - */ -struct spi_nor_locking_ops { - int (*lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); - int (*is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); -}; - -/** - * struct spi_nor_flash_parameter - SPI NOR flash parameters and settings. - * Includes legacy flash parameters and settings that can be overwritten - * by the spi_nor_fixups hooks, or dynamically when parsing the JESD216 - * Serial Flash Discoverable Parameters (SFDP) tables. - * - * @size: the flash memory density in bytes. - * @page_size: the page size of the SPI NOR flash memory. - * @hwcaps: describes the read and page program hardware - * capabilities. - * @reads: read capabilities ordered by priority: the higher index - * in the array, the higher priority. - * @page_programs: page program capabilities ordered by priority: the - * higher index in the array, the higher priority. - * @erase_map: the erase map parsed from the SFDP Sector Map Parameter - * Table. - * @quad_enable: enables SPI NOR quad mode. - * @set_4byte: puts the SPI NOR in 4 byte addressing mode. - * @convert_addr: converts an absolute address into something the flash - * will understand. Particularly useful when pagesize is - * not a power-of-2. - * @setup: configures the SPI NOR memory. Useful for SPI NOR - * flashes that have peculiarities to the SPI NOR standard - * e.g. different opcodes, specific address calculation, - * page size, etc. - * @locking_ops: SPI NOR locking methods. - */ -struct spi_nor_flash_parameter { - u64 size; - u32 page_size; - - struct spi_nor_hwcaps hwcaps; - struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; - struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; - - struct spi_nor_erase_map erase_map; - - int (*quad_enable)(struct spi_nor *nor); - int (*set_4byte)(struct spi_nor *nor, bool enable); - u32 (*convert_addr)(struct spi_nor *nor, u32 addr); - int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); - - const struct spi_nor_locking_ops *locking_ops; -}; - -/** - * struct flash_info - Forward declaration of a structure used internally by - * spi_nor_scan() +/* + * Forward declarations that are used internally by the core and manufacturer + * drivers. */ struct flash_info; +struct spi_nor_manufacturer; +struct spi_nor_flash_parameter; /** * struct spi_nor - Structure for defining a the SPI NOR layer @@ -562,6 +336,7 @@ struct flash_info; * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer * @info: spi-nor part JDEC MFR id and other info + * @manufacturer: spi-nor manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -578,6 +353,7 @@ struct flash_info; * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. + * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. * @priv: the private data */ struct spi_nor { @@ -588,6 +364,7 @@ struct spi_nor { u8 *bouncebuf; size_t bouncebuf_size; const struct flash_info *info; + const struct spi_nor_manufacturer *manufacturer; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -602,40 +379,16 @@ struct spi_nor { const struct spi_nor_controller_ops *controller_ops; - struct spi_nor_flash_parameter params; + struct spi_nor_flash_parameter *params; + + struct { + struct spi_mem_dirmap_desc *rdesc; + struct spi_mem_dirmap_desc *wdesc; + } dirmap; void *priv; }; -static u64 __maybe_unused -spi_nor_region_is_last(const struct spi_nor_erase_region *region) -{ - return region->offset & SNOR_LAST_REGION; -} - -static u64 __maybe_unused -spi_nor_region_end(const struct spi_nor_erase_region *region) -{ - return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; -} - -static void __maybe_unused -spi_nor_region_mark_end(struct spi_nor_erase_region *region) -{ - region->offset |= SNOR_LAST_REGION; -} - -static void __maybe_unused -spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) -{ - region->offset |= SNOR_OVERLAID_REGION; -} - -static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor) -{ - return !!nor->params.erase_map.uniform_erase_type; -} - static inline void spi_nor_set_flash_node(struct spi_nor *nor, struct device_node *np) { diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 4ea558bd3c46..1077c45721ff 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -32,9 +32,9 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) -#define SPINAND_READID_OP(ndummy, buf, len) \ +#define SPINAND_READID_OP(naddr, ndummy, buf, len) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ - SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_ADDR(naddr, 0, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1)) @@ -176,37 +176,46 @@ struct spinand_device; * @data: buffer containing the id bytes. Currently 4 bytes large, but can * be extended if required * @len: ID length - * - * struct_spinand_id->data contains all bytes returned after a READ_ID command, - * including dummy bytes if the chip does not emit ID bytes right after the - * READ_ID command. The responsibility to extract real ID bytes is left to - * struct_manufacurer_ops->detect(). */ struct spinand_id { u8 data[SPINAND_MAX_ID_LEN]; int len; }; +enum spinand_readid_method { + SPINAND_READID_METHOD_OPCODE, + SPINAND_READID_METHOD_OPCODE_ADDR, + SPINAND_READID_METHOD_OPCODE_DUMMY, +}; + +/** + * struct spinand_devid - SPI NAND device id structure + * @id: device id of current chip + * @len: number of bytes in device id + * @method: method to read chip id + * There are 3 possible variants: + * SPINAND_READID_METHOD_OPCODE: chip id is returned immediately + * after read_id opcode. + * SPINAND_READID_METHOD_OPCODE_ADDR: chip id is returned after + * read_id opcode + 1-byte address. + * SPINAND_READID_METHOD_OPCODE_DUMMY: chip id is returned after + * read_id opcode + 1 dummy byte. + */ +struct spinand_devid { + const u8 *id; + const u8 len; + const enum spinand_readid_method method; +}; + /** * struct manufacurer_ops - SPI NAND manufacturer specific operations - * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed - * the core calls the struct_manufacurer_ops->detect() hook of each - * registered manufacturer until one of them return 1. Note that - * the first thing to check in this hook is that the manufacturer ID - * in struct_spinand_device->id matches the manufacturer whose - * ->detect() hook has been called. Should return 1 if there's a - * match, 0 if the manufacturer ID does not match and a negative - * error code otherwise. When true is returned, the core assumes - * that properties of the NAND chip (spinand->base.memorg and - * spinand->base.eccreq) have been filled * @init: initialize a SPI NAND device * @cleanup: cleanup a SPI NAND device * * Each SPI NAND manufacturer driver should implement this interface so that - * NAND chips coming from this vendor can be detected and initialized properly. + * NAND chips coming from this vendor can be initialized properly. */ struct spinand_manufacturer_ops { - int (*detect)(struct spinand_device *spinand); int (*init)(struct spinand_device *spinand); void (*cleanup)(struct spinand_device *spinand); }; @@ -215,11 +224,16 @@ struct spinand_manufacturer_ops { * struct spinand_manufacturer - SPI NAND manufacturer instance * @id: manufacturer ID * @name: manufacturer name + * @devid_len: number of bytes in device ID + * @chips: supported SPI NANDs under current manufacturer + * @nchips: number of SPI NANDs available in chips array * @ops: manufacturer operations */ struct spinand_manufacturer { u8 id; char *name; + const struct spinand_info *chips; + const size_t nchips; const struct spinand_manufacturer_ops *ops; }; @@ -270,6 +284,7 @@ struct spinand_ecc_info { }; #define SPINAND_HAS_QE_BIT BIT(0) +#define SPINAND_HAS_CR_FEAT_BIT BIT(1) /** * struct spinand_info - Structure used to describe SPI NAND chips @@ -291,7 +306,7 @@ struct spinand_ecc_info { */ struct spinand_info { const char *model; - u16 devid; + struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; struct nand_ecc_req eccreq; @@ -305,6 +320,13 @@ struct spinand_info { unsigned int target); }; +#define SPINAND_ID(__method, ...) \ + { \ + .id = (const u8[]){ __VA_ARGS__ }, \ + .len = sizeof((u8[]){ __VA_ARGS__ }), \ + .method = __method, \ + } + #define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ { \ .read_cache = __read, \ @@ -451,9 +473,10 @@ static inline void spinand_set_of_node(struct spinand_device *spinand, nanddev_set_of_node(&spinand->base, np); } -int spinand_match_and_init(struct spinand_device *dev, +int spinand_match_and_init(struct spinand_device *spinand, const struct spinand_info *table, - unsigned int table_size, u16 devid); + unsigned int table_size, + enum spinand_readid_method rdid_method); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index aca8f36dfac9..ae197cc00cc8 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -109,8 +109,11 @@ do { \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } #else # define __DEP_MAP_MUTEX_INITIALIZER(lockname) #endif diff --git a/include/linux/namei.h b/include/linux/namei.h index 0dd980d7318f..a4bb992623c4 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -15,7 +15,7 @@ enum { MAX_NESTED_LINKS = 8 }; /* * Type of the last component on LOOKUP_PARENT */ -enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; +enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; /* pathwalk mode */ #define LOOKUP_FOLLOW 0x0001 /* follow links at the end */ @@ -23,6 +23,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */ #define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */ #define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */ +#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */ #define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */ #define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */ @@ -64,7 +65,6 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); -extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 34d050bb1ae6..9d53c5ad272c 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -83,6 +83,8 @@ enum { NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */ NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */ + NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */ + /* * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe @@ -154,6 +156,7 @@ enum { #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) #define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST) #define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST) +#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC) /* Finds the next feature with the highest number of the range of start till 0. */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6c3f7032e8d9..130a668049ab 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -53,6 +53,8 @@ struct netpoll_info; struct device; struct phy_device; struct dsa_port; +struct macsec_context; +struct macsec_ops; struct sfp_bus; /* 802.11 specific */ @@ -664,7 +666,7 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node struct rps_map { unsigned int len; struct rcu_head rcu; - u16 cpus[0]; + u16 cpus[]; }; #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) @@ -686,7 +688,7 @@ struct rps_dev_flow { struct rps_dev_flow_table { unsigned int mask; struct rcu_head rcu; - struct rps_dev_flow flows[0]; + struct rps_dev_flow flows[]; }; #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ ((_num) * sizeof(struct rps_dev_flow))) @@ -704,7 +706,7 @@ struct rps_dev_flow_table { struct rps_sock_flow_table { u32 mask; - u32 ents[0] ____cacheline_aligned_in_smp; + u32 ents[] ____cacheline_aligned_in_smp; }; #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) @@ -767,7 +769,7 @@ struct xps_map { unsigned int len; unsigned int alloc_len; struct rcu_head rcu; - u16 queues[0]; + u16 queues[]; }; #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ @@ -778,7 +780,7 @@ struct xps_map { */ struct xps_dev_maps { struct rcu_head rcu; - struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ + struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ }; #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ @@ -853,6 +855,7 @@ enum tc_setup_type { TC_SETUP_FT, TC_SETUP_QDISC_ETS, TC_SETUP_QDISC_TBF, + TC_SETUP_QDISC_FIFO, }; /* These structures hold the attributes of bpf state that are being passed @@ -1818,6 +1821,8 @@ enum netdev_priv_flags { * that follow this device when it is moved * to another network namespace. * + * @macsec_ops: MACsec offloading ops + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2112,6 +2117,11 @@ struct net_device { unsigned wol_enabled:1; struct list_head net_notifier_list; + +#if IS_ENABLED(CONFIG_MACSEC) + /* MACsec management functions */ + const struct macsec_ops *macsec_ops; +#endif }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -3767,7 +3777,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, - int fd, u32 flags); + int fd, int expected_fd, u32 flags); u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, enum bpf_netdev_command cmd); int xdp_umem_query(struct net_device *dev, u16 queue_id); diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 908d38dbcb91..ab192720e2d6 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -98,7 +98,7 @@ struct ip_set_counter { struct ip_set_comment_rcu { struct rcu_head rcu; - char str[0]; + char str[]; }; struct ip_set_comment { @@ -121,6 +121,7 @@ struct ip_set_ext { u32 timeout; u8 packets_op; u8 bytes_op; + bool target; }; struct ip_set; @@ -187,6 +188,14 @@ struct ip_set_type_variant { /* Return true if "b" set is the same as "a" * according to the create set parameters */ bool (*same_set)(const struct ip_set *a, const struct ip_set *b); + /* Region-locking is used */ + bool region_lock; +}; + +struct ip_set_region { + spinlock_t lock; /* Region lock */ + size_t ext_size; /* Size of the dynamic extensions */ + u32 elements; /* Number of elements vs timeout */ }; /* The core set type structure */ @@ -501,7 +510,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo, } #define IP_SET_INIT_KEXT(skb, opt, set) \ - { .bytes = (skb)->len, .packets = 1, \ + { .bytes = (skb)->len, .packets = 1, .target = true,\ .timeout = ip_set_adt_opt_timeout(opt, set) } #define IP_SET_INIT_UEXT(set) \ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 1b261c51b3a3..5da88451853b 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -264,7 +264,7 @@ struct xt_table_info { unsigned int stacksize; void ***jumpstack; - unsigned char entries[0] __aligned(8); + unsigned char entries[] __aligned(8); }; int xt_register_target(struct xt_target *target); @@ -464,7 +464,7 @@ struct compat_xt_entry_match { } kernel; u_int16_t match_size; } u; - unsigned char data[0]; + unsigned char data[]; }; struct compat_xt_entry_target { @@ -480,7 +480,7 @@ struct compat_xt_entry_target { } kernel; u_int16_t target_size; } u; - unsigned char data[0]; + unsigned char data[]; }; /* FIXME: this works only on 32 bit tasks @@ -494,7 +494,7 @@ struct compat_xt_counters { struct compat_xt_counters_info { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t num_counters; - struct compat_xt_counters counters[0]; + struct compat_xt_counters counters[]; }; struct _compat_xt_align { diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index e98028f00e47..7d3537c40ec9 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -67,7 +67,7 @@ struct compat_arpt_entry { __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; - unsigned char elems[0]; + unsigned char elems[]; }; static inline struct xt_entry_target * diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 162f59d0d17a..2f5c4e6ecd8a 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -85,7 +85,7 @@ struct ebt_table_info { /* room to maintain the stack used for jumping from and into udc */ struct ebt_chainstack **chainstack; char *entries; - struct ebt_counter counters[0] ____cacheline_aligned; + struct ebt_counter counters[] ____cacheline_aligned; }; struct ebt_table { diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index e9e1ed74cdf1..b394bd4f68a3 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -76,7 +76,7 @@ struct compat_ipt_entry { __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; - unsigned char elems[0]; + unsigned char elems[]; }; /* Helper functions */ diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 78ab959c4575..8225f7821a29 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -43,7 +43,7 @@ struct compat_ip6t_entry { __u16 next_offset; compat_uint_t comefrom; struct compat_xt_counters counters; - unsigned char elems[0]; + unsigned char elems[]; }; static inline struct xt_entry_target * diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 205fa7b1f07a..e3e49f0e5c13 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -115,6 +115,19 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, { u64 __cookie = cookie; + if (!extack) + return; + memcpy(extack->cookie, &__cookie, sizeof(__cookie)); + extack->cookie_len = sizeof(__cookie); +} + +static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, + u32 cookie) +{ + u32 __cookie = cookie; + + if (!extack) + return; memcpy(extack->cookie, &__cookie, sizeof(__cookie)); extack->cookie_len = sizeof(__cookie); } @@ -188,10 +201,10 @@ struct netlink_callback { struct module *module; struct netlink_ext_ack *extack; u16 family; - u16 min_dump_alloc; - bool strict_check; u16 answer_flags; + u32 min_dump_alloc; unsigned int prev_seq, seq; + bool strict_check; union { u8 ctx[48]; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 5d5b91e54f73..73eda45f1cfd 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -354,6 +354,7 @@ static inline unsigned long nfs_save_change_attribute(struct inode *dir) extern int nfs_sync_mapping(struct address_space *mapping); extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); extern void nfs_zap_caches(struct inode *); +extern void nfs_set_inode_stale(struct inode *inode); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *); diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 0bbd587fac6a..c32c15216da3 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -139,9 +139,14 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern void nfs_unlock_and_release_request(struct nfs_page *); +extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req); +extern int nfs_page_group_lock_subrequests(struct nfs_page *head); +extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 94c77ed55ce1..440230488025 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -75,6 +75,7 @@ struct nfs_fattr { struct nfs4_string *owner_name; struct nfs4_string *group_name; struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */ + struct nfs4_label *label; }; #define NFS_ATTR_FATTR_TYPE (1U << 0) @@ -1265,16 +1266,25 @@ struct nfstime4 { struct pnfs_commit_bucket { struct list_head written; struct list_head committing; - struct pnfs_layout_segment *wlseg; - struct pnfs_layout_segment *clseg; + struct pnfs_layout_segment *lseg; struct nfs_writeverf direct_verf; }; +struct pnfs_commit_array { + struct list_head cinfo_list; + struct list_head lseg_list; + struct pnfs_layout_segment *lseg; + struct rcu_head rcu; + refcount_t refcount; + unsigned int nbuckets; + struct pnfs_commit_bucket buckets[]; +}; + struct pnfs_ds_commit_info { - int nwritten; - int ncommitting; - int nbuckets; - struct pnfs_commit_bucket *buckets; + struct list_head commits; + unsigned int nwritten; + unsigned int ncommitting; + const struct pnfs_commit_ops *ops; }; struct nfs41_state_protection { @@ -1385,22 +1395,11 @@ struct nfs41_free_stateid_res { unsigned int status; }; -static inline void -nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) -{ - kfree(cinfo->buckets); -} - #else struct pnfs_ds_commit_info { }; -static inline void -nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) -{ -} - #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index d3776be48c53..1b311d27c9b8 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -63,6 +63,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); +int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); /* direct nvmem device read/write interface */ struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); @@ -138,6 +139,12 @@ static inline int nvmem_cell_read_u32(struct device *dev, return -EOPNOTSUPP; } +static inline int nvmem_cell_read_u64(struct device *dev, + const char *cell_id, u64 *val) +{ + return -EOPNOTSUPP; +} + static inline struct nvmem_device *nvmem_device_get(struct device *dev, const char *name) { diff --git a/include/linux/of_address.h b/include/linux/of_address.h index eac7ab109df4..763022ed3456 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -10,21 +10,27 @@ struct of_pci_range_parser { struct device_node *node; const __be32 *range; const __be32 *end; - int np; + int na; + int ns; int pna; bool dma; }; +#define of_range_parser of_pci_range_parser struct of_pci_range { - u32 pci_space; - u64 pci_addr; + union { + u64 pci_addr; + u64 bus_addr; + }; u64 cpu_addr; u64 size; u32 flags; }; +#define of_range of_pci_range #define for_each_of_pci_range(parser, range) \ for (; of_pci_range_parser_one(parser, range);) +#define for_each_of_range for_each_of_pci_range /* Translate a DMA address from device space to CPU space */ extern u64 of_translate_dma_address(struct device_node *dev, @@ -143,4 +149,3 @@ static inline int of_pci_range_to_resource(struct of_pci_range *range, #endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ #endif /* __OF_ADDRESS_H */ - diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h index c86fcad23fc2..31b73a0da9db 100644 --- a/include/linux/of_clk.h +++ b/include/linux/of_clk.h @@ -11,17 +11,17 @@ struct of_device_id; #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF) -unsigned int of_clk_get_parent_count(struct device_node *np); -const char *of_clk_get_parent_name(struct device_node *np, int index); +unsigned int of_clk_get_parent_count(const struct device_node *np); +const char *of_clk_get_parent_name(const struct device_node *np, int index); void of_clk_init(const struct of_device_id *matches); #else /* !CONFIG_COMMON_CLK || !CONFIG_OF */ -static inline unsigned int of_clk_get_parent_count(struct device_node *np) +static inline unsigned int of_clk_get_parent_count(const struct device_node *np) { return 0; } -static inline const char *of_clk_get_parent_name(struct device_node *np, +static inline const char *of_clk_get_parent_name(const struct device_node *np, int index) { return NULL; diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 16967390a3fe..f821095218b0 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -11,9 +11,8 @@ #define __LINUX_OF_GPIO_H #include <linux/compiler.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> +#include <linux/gpio.h> /* FIXME: Shouldn't be here */ #include <linux/of.h> struct device_node; @@ -34,6 +33,8 @@ enum of_gpio_flags { #ifdef CONFIG_OF_GPIO +#include <linux/kernel.h> + /* * OF GPIO chip for memory mapped banks */ @@ -63,6 +64,8 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); #else /* CONFIG_OF_GPIO */ +#include <linux/errno.h> + /* Drivers may not strictly depend on the GPIO support, so let them link. */ static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1bf83c8fcaa7..222f6f7b2bb3 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -63,6 +63,11 @@ * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. * + * PG_swapbacked is set when a page uses swap as a backing storage. This are + * usually PageAnon or shmem pages but please note that even anonymous pages + * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as + * a result of MADV_FREE). + * * PG_uptodate tells whether the page's contents is valid. When a read * completes, the page becomes uptodate, unless a disk I/O error happened. * @@ -163,6 +168,9 @@ enum pageflags { /* non-lru isolated movable page */ PG_isolated = PG_reclaim, + + /* Only valid for buddy pages. Used to track pages that are reported */ + PG_reported = PG_uptodate, }; #ifndef __GENERATING_BOUNDS_H @@ -311,7 +319,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } __PAGEFLAG(Locked, locked, PF_NO_TAIL) PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) -PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND) +PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) __SETPAGEFLAG(Referenced, referenced, PF_HEAD) @@ -432,6 +440,14 @@ PAGEFLAG(Idle, idle, PF_ANY) #endif /* + * PageReported() is used to track reported free pages within the Buddy + * allocator. We can use the non-atomic version of the test and set + * operations as both should be shielded with the zone lock to prevent + * any possible races on the setting or clearing of the bit. + */ +__PAGEFLAG(Reported, reported, PF_NO_COMPOUND) + +/* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 14d14beb1f7f..d27701199a4d 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -102,6 +102,15 @@ static inline void page_ref_sub(struct page *page, int nr) __page_ref_mod(page, -nr); } +static inline int page_ref_sub_return(struct page *page, int nr) +{ + int ret = atomic_sub_return(nr, &page->_refcount); + + if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) + __page_ref_mod_and_return(page, -nr, ret); + return ret; +} + static inline void page_ref_inc(struct page *page) { atomic_inc(&page->_refcount); diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h new file mode 100644 index 000000000000..3b99e0ec24f2 --- /dev/null +++ b/include/linux/page_reporting.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGE_REPORTING_H +#define _LINUX_PAGE_REPORTING_H + +#include <linux/mmzone.h> +#include <linux/scatterlist.h> + +/* This value should always be a power of 2, see page_reporting_cycle() */ +#define PAGE_REPORTING_CAPACITY 32 + +struct page_reporting_dev_info { + /* function that alters pages to make them "reported" */ + int (*report)(struct page_reporting_dev_info *prdev, + struct scatterlist *sg, unsigned int nents); + + /* work struct for processing reports */ + struct delayed_work work; + + /* Current state of page reporting */ + atomic_t state; +}; + +/* Tear-down and bring-up for page reporting devices */ +void page_reporting_unregister(struct page_reporting_dev_info *prdev); +int page_reporting_register(struct page_reporting_dev_info *prdev); +#endif /*_LINUX_PAGE_REPORTING_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ccb14b6a16b5..a8f7bd8ea1c6 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -33,8 +33,8 @@ enum mapping_flags { /** * mapping_set_error - record a writeback error in the address_space - * @mapping - the mapping in which an error should be set - * @error - the error to set in the mapping + * @mapping: the mapping in which an error should be set + * @error: the error to set in the mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor @@ -70,11 +70,9 @@ static inline void mapping_clear_unevictable(struct address_space *mapping) clear_bit(AS_UNEVICTABLE, &mapping->flags); } -static inline int mapping_unevictable(struct address_space *mapping) +static inline bool mapping_unevictable(struct address_space *mapping) { - if (mapping) - return test_bit(AS_UNEVICTABLE, &mapping->flags); - return !!mapping; + return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_set_exiting(struct address_space *mapping) @@ -305,9 +303,9 @@ static inline struct page *find_lock_page(struct address_space *mapping, * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, - pgoff_t offset, gfp_t gfp_mask) + pgoff_t index, gfp_t gfp_mask) { - return pagecache_get_page(mapping, offset, + return pagecache_get_page(mapping, index, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } @@ -333,14 +331,17 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping_gfp_mask(mapping)); } -static inline struct page *find_subpage(struct page *page, pgoff_t offset) +/* + * Given the page we found in the page cache, return the page corresponding + * to this index in the file + */ +static inline struct page *find_subpage(struct page *head, pgoff_t index) { - if (PageHuge(page)) - return page; - - VM_BUG_ON_PAGE(PageTail(page), page); + /* HugeTLBfs wants the head page regardless */ + if (PageHuge(head)) + return head; - return page + (offset & (compound_nr(page) - 1)); + return head + (index & (hpage_nr_pages(head) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h new file mode 100644 index 000000000000..ece607607a86 --- /dev/null +++ b/include/linux/part_stat.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PART_STAT_H +#define _LINUX_PART_STAT_H + +#include <linux/genhd.h> + +/* + * Macros to operate on percpu disk statistics: + * + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters + * and should be called between disk_stat_lock() and + * disk_stat_unlock(). + * + * part_stat_read() can be called at any time. + * + * part_stat_{add|set_all}() and {init|free}_part_stats are for + * internal use only. + */ +#ifdef CONFIG_SMP +#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) +#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) + +#define part_stat_get_cpu(part, field, cpu) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field) + +#define part_stat_get(part, field) \ + part_stat_get_cpu(part, field, smp_processor_id()) + +#define part_stat_read(part, field) \ +({ \ + typeof((part)->dkstats->field) res = 0; \ + unsigned int _cpu; \ + for_each_possible_cpu(_cpu) \ + res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ + res; \ +}) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + int i; + + for_each_possible_cpu(i) + memset(per_cpu_ptr(part->dkstats, i), value, + sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + part->dkstats = alloc_percpu(struct disk_stats); + if (!part->dkstats) + return 0; + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ + free_percpu(part->dkstats); +} + +#else /* !CONFIG_SMP */ +#define part_stat_lock() ({ rcu_read_lock(); 0; }) +#define part_stat_unlock() rcu_read_unlock() + +#define part_stat_get(part, field) ((part)->dkstats.field) +#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field) +#define part_stat_read(part, field) part_stat_get(part, field) + +static inline void part_stat_set_all(struct hd_struct *part, int value) +{ + memset(&part->dkstats, value, sizeof(struct disk_stats)); +} + +static inline int init_part_stats(struct hd_struct *part) +{ + return 1; +} + +static inline void free_part_stats(struct hd_struct *part) +{ +} + +#endif /* CONFIG_SMP */ + +#define part_stat_read_accum(part, field) \ + (part_stat_read(part, field[STAT_READ]) + \ + part_stat_read(part, field[STAT_WRITE]) + \ + part_stat_read(part, field[STAT_DISCARD])) + +#define __part_stat_add(part, field, addnd) \ + (part_stat_get(part, field) += (addnd)) + +#define part_stat_add(part, field, addnd) do { \ + __part_stat_add((part), field, addnd); \ + if ((part)->partno) \ + __part_stat_add(&part_to_disk((part))->part0, \ + field, addnd); \ +} while (0) + +#define part_stat_dec(gendiskp, field) \ + part_stat_add(gendiskp, field, -1) +#define part_stat_inc(gendiskp, field) \ + part_stat_add(gendiskp, field, 1) +#define part_stat_sub(gendiskp, field, subnd) \ + part_stat_add(gendiskp, field, -subnd) + +#define part_stat_local_dec(gendiskp, field) \ + local_dec(&(part_stat_get(gendiskp, field))) +#define part_stat_local_inc(gendiskp, field) \ + local_inc(&(part_stat_get(gendiskp, field))) +#define part_stat_local_read(gendiskp, field) \ + local_read(&(part_stat_get(gendiskp, field))) +#define part_stat_local_read_cpu(gendiskp, field, cpu) \ + local_read(&(part_stat_get_cpu(gendiskp, field, cpu))) + +#endif /* _LINUX_PART_STAT_H */ diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 62b7fdcc661c..2d155bfb8fbf 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -112,6 +112,14 @@ extern const guid_t pci_acpi_dsm_guid; #define RESET_DELAY_DSM 0x08 #define FUNCTION_DELAY_DSM 0x09 +#ifdef CONFIG_PCIE_EDR +void pci_acpi_add_edr_notifier(struct pci_dev *pdev); +void pci_acpi_remove_edr_notifier(struct pci_dev *pdev); +#else +static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { } +static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { } +#endif /* CONFIG_PCIE_EDR */ + #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 56f1846b9d39..e0ed9d01f6e5 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -53,7 +53,8 @@ struct pci_epc_ops { phys_addr_t addr); int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); int (*get_msi)(struct pci_epc *epc, u8 func_no); - int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts); + int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno, u32 offset); int (*get_msix)(struct pci_epc *epc, u8 func_no); int (*raise_irq)(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); @@ -71,6 +72,7 @@ struct pci_epc_ops { * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region * @page_size: size of each page + * @lock: mutex to protect bitmap */ struct pci_epc_mem { phys_addr_t phys_base; @@ -78,6 +80,8 @@ struct pci_epc_mem { unsigned long *bitmap; size_t page_size; int pages; + /* mutex to protect against concurrent access for memory allocation*/ + struct mutex lock; }; /** @@ -88,7 +92,9 @@ struct pci_epc_mem { * @mem: address space of the endpoint controller * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device - * @lock: spinlock to protect pci_epc ops + * @lock: mutex to protect pci_epc ops + * @function_num_map: bitmap to manage physical function number + * @notifier: used to notify EPF of any EPC events (like linkup) */ struct pci_epc { struct device dev; @@ -97,8 +103,10 @@ struct pci_epc { struct pci_epc_mem *mem; u8 max_functions; struct config_group *group; - /* spinlock to protect against concurrent access of EP controller */ - spinlock_t lock; + /* mutex to protect against concurrent access of EP controller */ + struct mutex lock; + unsigned long function_num_map; + struct atomic_notifier_head notifier; }; /** @@ -113,6 +121,7 @@ struct pci_epc { */ struct pci_epc_features { unsigned int linkup_notifier : 1; + unsigned int core_init_notifier : 1; unsigned int msi_capable : 1; unsigned int msix_capable : 1; u8 reserved_bar; @@ -141,6 +150,12 @@ static inline void *epc_get_drvdata(struct pci_epc *epc) return dev_get_drvdata(&epc->dev); } +static inline int +pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&epc->notifier, nb); +} + struct pci_epc * __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner); @@ -151,6 +166,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc); void pci_epc_destroy(struct pci_epc *epc); int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); void pci_epc_linkup(struct pci_epc *epc); +void pci_epc_init_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); @@ -165,7 +181,8 @@ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr); int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts); +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, + enum pci_barno, u32 offset); int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 2d6f07556682..6644ff3b0702 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -15,6 +15,11 @@ struct pci_epf; +enum pci_notify_event { + CORE_INIT, + LINK_UP, +}; + enum pci_barno { BAR_0, BAR_1, @@ -55,13 +60,10 @@ struct pci_epf_header { * @bind: ops to perform when a EPC device has been bound to EPF device * @unbind: ops to perform when a binding has been lost between a EPC device * and EPF device - * @linkup: ops to perform when the EPC device has established a connection with - * a host system */ struct pci_epf_ops { int (*bind)(struct pci_epf *epf); void (*unbind)(struct pci_epf *epf); - void (*linkup)(struct pci_epf *epf); }; /** @@ -92,10 +94,12 @@ struct pci_epf_driver { /** * struct pci_epf_bar - represents the BAR of EPF device * @phys_addr: physical address that should be mapped to the BAR + * @addr: virtual address corresponding to the @phys_addr * @size: the size of the address space present in BAR */ struct pci_epf_bar { dma_addr_t phys_addr; + void *addr; size_t size; enum pci_barno barno; int flags; @@ -112,6 +116,8 @@ struct pci_epf_bar { * @epc: the EPC device to which this EPF device is bound * @driver: the EPF driver to which this EPF device is bound * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc + * @nb: notifier block to notify EPF of any EPC events (like linkup) + * @lock: mutex to protect pci_epf_ops */ struct pci_epf { struct device dev; @@ -125,6 +131,22 @@ struct pci_epf { struct pci_epc *epc; struct pci_epf_driver *driver; struct list_head list; + struct notifier_block nb; + /* mutex to protect against concurrent access of pci_epf_ops */ + struct mutex lock; +}; + +/** + * struct pci_epf_msix_tbl - represents the MSIX table entry structure + * @msg_addr: Writes to this address will trigger MSIX interrupt in host + * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt + * @vector_ctrl: Identifies if the function is prohibited from sending a message + * using this MSIX table entry + */ +struct pci_epf_msix_tbl { + u64 msg_addr; + u32 msg_data; + u32 vector_ctrl; }; #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) @@ -154,5 +176,4 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); -void pci_epf_linkup(struct pci_epf *epf); #endif /* __LINUX_PCI_EPF_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 3840a541a9de..83ce1cdf5676 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -42,6 +42,13 @@ #include <linux/pci_ids.h> +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_SIG_TARGET_ABORT | \ + PCI_STATUS_PARITY) + /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded @@ -236,7 +243,7 @@ enum pcie_link_width { PCIE_LNK_WIDTH_UNKNOWN = 0xff, }; -/* Based on the PCI Hotplug Spec, but some values are made up by us */ +/* See matching string table in pci_speed_string() */ enum pci_bus_speed { PCI_SPEED_33MHz = 0x00, PCI_SPEED_66MHz = 0x01, @@ -444,6 +451,11 @@ struct pci_dev { const struct attribute_group **msi_irq_groups; #endif struct pci_vpd *vpd; +#ifdef CONFIG_PCIE_DPC + u16 dpc_cap; + unsigned int dpc_rp_extensions:1; + u8 dpc_rp_log_size; +#endif #ifdef CONFIG_PCI_ATS union { struct pci_sriov *sriov; /* PF: SR-IOV info */ @@ -510,7 +522,9 @@ struct pci_host_bridge { unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ unsigned int native_pme:1; /* OS may use PCIe PME */ unsigned int native_ltr:1; /* OS may use PCIe LTR */ + unsigned int native_dpc:1; /* OS may use PCIe DPC */ unsigned int preserve_config:1; /* Preserve FW resource setup */ + unsigned int size_windows:1; /* Enable root bus sizing */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, @@ -1045,6 +1059,8 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap); int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); +u64 pci_get_dsn(struct pci_dev *dev); + struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, @@ -1203,6 +1219,7 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags); bool pci_device_is_present(struct pci_dev *pdev); void pci_ignore_hotplug(struct pci_dev *dev); struct pci_dev *pci_real_dma_dev(struct pci_dev *dev); +int pci_status_get_and_clear_errors(struct pci_dev *pdev); int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, @@ -1214,7 +1231,6 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); @@ -1699,6 +1715,9 @@ static inline int pci_find_next_capability(struct pci_dev *dev, u8 post, static inline int pci_find_ext_capability(struct pci_dev *dev, int cap) { return 0; } +static inline u64 pci_get_dsn(struct pci_dev *dev) +{ return 0; } + /* Power management related routines */ static inline int pci_save_state(struct pci_dev *dev) { return 0; } static inline void pci_restore_state(struct pci_dev *dev) { } @@ -2171,6 +2190,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_INFO_FLD_HDR_SIZE 3 #define PCI_VPD_RO_KEYWORD_PARTNO "PN" +#define PCI_VPD_RO_KEYWORD_SERIALNO "SN" #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 352c0d708720..1dfc4e1dcb94 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -148,6 +148,8 @@ /* Vendors and devices. Sort key: vendor first, device next. */ +#define PCI_VENDOR_ID_LOONGSON 0x0014 + #define PCI_VENDOR_ID_TTTECH 0x0357 #define PCI_DEVICE_ID_TTTECH_MC322 0x000a @@ -2583,6 +2585,8 @@ #define PCI_VENDOR_ID_AMAZON 0x1d0f +#define PCI_VENDOR_ID_ZHAOXIN 0x1d17 + #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_VENDOR_ID_HXT 0x1dbf diff --git a/include/linux/pe.h b/include/linux/pe.h index c86bd3a2f70f..8ad71d763a77 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -10,6 +10,27 @@ #include <linux/types.h> +/* + * Linux EFI stub v1.0 adds the following functionality: + * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path, + * - Loading/starting the kernel from firmware that targets a different + * machine type, via the entrypoint exposed in the .compat PE/COFF section. + * + * The recommended way of loading and starting v1.0 or later kernels is to use + * the LoadImage() and StartImage() EFI boot services, and expose the initrd + * via the LINUX_EFI_INITRD_MEDIA_GUID device path. + * + * Versions older than v1.0 support initrd loading via the image load options + * (using initrd=, limited to the volume from which the kernel itself was + * loaded), or via arch specific means (bootparams, DT, etc). + * + * On x86, LoadImage() and StartImage() can be omitted if the EFI handover + * protocol is implemented, which can be inferred from the version, + * handover_offset and xloadflags fields in the bootparams structure. + */ +#define LINUX_EFISTUB_MAJOR_VERSION 0x1 +#define LINUX_EFISTUB_MINOR_VERSION 0x0 + #define MZ_MAGIC 0x5a4d /* "MZ" */ #define PE_MAGIC 0x00004550 /* "PE\0\0" */ diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index ad2ca2a89d5b..5e033fe1ff4e 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -3,41 +3,52 @@ #define _LINUX_PERCPU_RWSEM_H #include <linux/atomic.h> -#include <linux/rwsem.h> #include <linux/percpu.h> #include <linux/rcuwait.h> +#include <linux/wait.h> #include <linux/rcu_sync.h> #include <linux/lockdep.h> struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; - struct rw_semaphore rw_sem; /* slowpath */ - struct rcuwait writer; /* blocked writer */ - int readers_block; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, +#else +#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) +#endif + #define __DEFINE_PERCPU_RWSEM(name, is_static) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ is_static struct percpu_rw_semaphore name = { \ .rss = __RCU_SYNC_INITIALIZER(name.rss), \ .read_count = &__percpu_rwsem_rc_##name, \ - .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ .writer = __RCUWAIT_INITIALIZER(name.writer), \ + .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \ + .block = ATOMIC_INIT(0), \ + __PERCPU_RWSEM_DEP_MAP_INIT(name) \ } + #define DEFINE_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, /* not static */) #define DEFINE_STATIC_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, static) -extern int __percpu_down_read(struct percpu_rw_semaphore *, int); -extern void __percpu_up_read(struct percpu_rw_semaphore *); +extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); - rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); + rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); preempt_disable(); /* @@ -48,8 +59,9 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ - __this_cpu_inc(*sem->read_count); - if (unlikely(!rcu_sync_is_idle(&sem->rss))) + if (likely(rcu_sync_is_idle(&sem->rss))) + __this_cpu_inc(*sem->read_count); + else __percpu_down_read(sem, false); /* Unconditional memory barrier */ /* * The preempt_enable() prevents the compiler from @@ -58,16 +70,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) preempt_enable(); } -static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { - int ret = 1; + bool ret = true; preempt_disable(); /* * Same as in percpu_down_read(). */ - __this_cpu_inc(*sem->read_count); - if (unlikely(!rcu_sync_is_idle(&sem->rss))) + if (likely(rcu_sync_is_idle(&sem->rss))) + __this_cpu_inc(*sem->read_count); + else ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); /* @@ -76,24 +89,36 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) */ if (ret) - rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); + rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); return ret; } static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { + rwsem_release(&sem->dep_map, _RET_IP_); + preempt_disable(); /* * Same as in percpu_down_read(). */ - if (likely(rcu_sync_is_idle(&sem->rss))) + if (likely(rcu_sync_is_idle(&sem->rss))) { __this_cpu_dec(*sem->read_count); - else - __percpu_up_read(sem); /* Unconditional memory barrier */ + } else { + /* + * slowpath; reader will only ever wake a single blocked + * writer. + */ + smp_mb(); /* B matches C */ + /* + * In other words, if they see our decrement (presumably to + * aggregate zero, as that is the only time it matters) they + * will also see our critical section. + */ + __this_cpu_dec(*sem->read_count); + rcuwait_wake_up(&sem->writer); + } preempt_enable(); - - rwsem_release(&sem->rw_sem.dep_map, _RET_IP_); } extern void percpu_down_write(struct percpu_rw_semaphore *); @@ -110,29 +135,19 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *); __percpu_init_rwsem(sem, #sem, &rwsem_key); \ }) -#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) - -#define percpu_rwsem_assert_held(sem) \ - lockdep_assert_held(&(sem)->rw_sem) +#define percpu_rwsem_is_held(sem) lockdep_is_held(sem) +#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { - lock_release(&sem->rw_sem.dep_map, ip); -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER - if (!read) - atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN); -#endif + lock_release(&sem->dep_map, ip); } static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { - lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER - if (!read) - atomic_long_set(&sem->rw_sem.owner, (long)current); -#endif + lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip); } #endif diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 4f052496cdfd..0a4f54dd4737 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -78,9 +78,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc) */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { - s64 ret = fbc->count; + /* Prevent reloads of fbc->count */ + s64 ret = READ_ONCE(fbc->count); - barrier(); /* Prevent reloads of fbc->count */ if (ret >= 0) return ret; return 0; diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 71f525a35ac2..5b616dde9a4c 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -80,6 +80,7 @@ struct arm_pmu { struct pmu pmu; cpumask_t supported_cpus; char *name; + int pmuver; irqreturn_t (*handle_irq)(struct arm_pmu *pmu); void (*enable)(struct perf_event *event); void (*disable)(struct perf_event *event); diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 547773f5894e..9c3e7619c929 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -93,14 +93,26 @@ struct perf_raw_record { /* * branch stack layout: * nr: number of taken branches stored in entries[] + * hw_idx: The low level index of raw branch records + * for the most recent branch. + * -1ULL means invalid/unknown. * * Note that nr can vary from sample to sample * branches (to, from) are stored from most recent * to least recent, i.e., entries[0] contains the most * recent branch. + * The entries[] is an abstraction of raw branch records, + * which may not be stored in age order in HW, e.g. Intel LBR. + * The hw_idx is to expose the low level index of raw + * branch record for the most recent branch aka entries[0]. + * The hw_idx index is between -1 (unknown) and max depth, + * which can be retrieved in /sys/devices/cpu/caps/branches. + * For the architectures whose raw branch records are + * already stored in age order, the hw_idx should be 0. */ struct perf_branch_stack { __u64 nr; + __u64 hw_idx; struct perf_branch_entry entries[0]; }; @@ -850,6 +862,13 @@ struct perf_cpu_context { int sched_cb_usage; int online; + /* + * Per-CPU storage for iterators used in visit_groups_merge. The default + * storage is of size 2 to hold the CPU and any CPU event iterators. + */ + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; }; struct perf_output_handle { @@ -1001,6 +1020,7 @@ struct perf_sample_data { u64 stack_user_size; u64 phys_addr; + u64 cgroup; } ____cacheline_aligned; /* default value for data source */ diff --git a/include/linux/phy.h b/include/linux/phy.h index c570e162e05e..2432ca463ddc 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -23,6 +23,8 @@ #include <linux/workqueue.h> #include <linux/mod_devicetable.h> #include <linux/u64_stats_sync.h> +#include <linux/irqreturn.h> +#include <linux/iopoll.h> #include <linux/atomic.h> @@ -94,6 +96,7 @@ typedef enum { PHY_INTERFACE_MODE_RTBI, PHY_INTERFACE_MODE_SMII, PHY_INTERFACE_MODE_XGMII, + PHY_INTERFACE_MODE_XLGMII, PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, @@ -165,6 +168,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "smii"; case PHY_INTERFACE_MODE_XGMII: return "xgmii"; + case PHY_INTERFACE_MODE_XLGMII: + return "xlgmii"; case PHY_INTERFACE_MODE_MOCA: return "moca"; case PHY_INTERFACE_MODE_QSGMII: @@ -289,6 +294,7 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) return devm_mdiobus_alloc_size(dev, 0); } +struct mii_bus *mdio_find_bus(const char *mdio_name); void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); @@ -357,8 +363,10 @@ struct macsec_ops; * is_gigabit_capable: Set to true if PHY supports 1000Mbps * has_fixups: Set to true if this phy has fixups/quirks. * suspended: Set to true if this phy has been suspended successfully. + * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus. * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * loopback_enabled: Set true if this phy has been loopbacked successfully. + * downshifted_rate: Set true if link speed has been downshifted. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * irq: IRQ number of the PHY's interrupt (-1 if none) @@ -396,8 +404,10 @@ struct phy_device { unsigned is_gigabit_capable:1; unsigned has_fixups:1; unsigned suspended:1; + unsigned suspended_by_mdio_bus:1; unsigned sysfs_links:1; unsigned loopback_enabled:1; + unsigned downshifted_rate:1; unsigned autoneg:1; /* The most recently read link state */ @@ -557,11 +567,12 @@ struct phy_driver { /* * Checks if the PHY generated an interrupt. * For multi-PHY devices with shared PHY interrupt pin + * Set interrupt bits have to be cleared. */ int (*did_interrupt)(struct phy_device *phydev); /* Override default interrupt handling */ - int (*handle_interrupt)(struct phy_device *phydev); + irqreturn_t (*handle_interrupt)(struct phy_device *phydev); /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); @@ -690,6 +701,7 @@ static inline bool phy_is_started(struct phy_device *phydev) void phy_resolve_aneg_pause(struct phy_device *phydev); void phy_resolve_aneg_linkmode(struct phy_device *phydev); +void phy_check_downshift(struct phy_device *phydev); /** * phy_read - Convenience function for reading a given PHY register @@ -705,6 +717,19 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } +#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \ + timeout_us, sleep_before_read) \ +({ \ + int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \ + sleep_us, timeout_us, sleep_before_read, phydev, regnum); \ + if (val < 0) \ + __ret = val; \ + if (__ret) \ + phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ + __ret; \ +}) + + /** * __phy_read - convenience function for reading a given PHY register * @phydev: the phy_device struct @@ -747,6 +772,25 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) } /** + * __phy_modify_changed() - Convenience function for modifying a PHY register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Unlocked helper function which allows a PHY register to be modified as + * new register value = (old register value & ~mask) | set + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum, + u16 mask, u16 set) +{ + return __mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr, + regnum, mask, set); +} + +/** * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. * @phydev: The phy_device struct @@ -757,6 +801,19 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) */ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); +#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \ + sleep_us, timeout_us, sleep_before_read) \ +({ \ + int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \ + sleep_us, timeout_us, sleep_before_read, \ + phydev, devaddr, regnum); \ + if (val < 0) \ + __ret = val; \ + if (__ret) \ + phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ + __ret; \ +}) + /** * __phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. @@ -1257,6 +1314,9 @@ void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); +void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); +void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, + bool *tx_pause, bool *rx_pause); int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h index 1235865e7e2c..71d956935405 100644 --- a/include/linux/phy/tegra/xusb.h +++ b/include/linux/phy/tegra/xusb.h @@ -21,4 +21,6 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl, int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl, bool val); int tegra_phy_xusb_utmi_port_reset(struct phy *phy); +int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, + unsigned int port); #endif /* PHY_TEGRA_XUSB_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 523209e70947..3f8d37ec5503 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -12,12 +12,10 @@ struct net_device; enum { MLO_PAUSE_NONE, - MLO_PAUSE_ASYM = BIT(0), - MLO_PAUSE_SYM = BIT(1), - MLO_PAUSE_RX = BIT(2), - MLO_PAUSE_TX = BIT(3), + MLO_PAUSE_RX = BIT(0), + MLO_PAUSE_TX = BIT(1), MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX, - MLO_PAUSE_AN = BIT(4), + MLO_PAUSE_AN = BIT(2), MLO_AN_PHY = 0, /* Conventional PHY */ MLO_AN_FIXED, /* Fixed-link mode */ @@ -93,9 +91,10 @@ struct phylink_mac_ops { void (*mac_an_restart)(struct phylink_config *config); void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); - void (*mac_link_up)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, - struct phy_device *phy); + void (*mac_link_up)(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause); }; #if 0 /* For kernel-doc purposes only. */ @@ -154,15 +153,33 @@ void mac_pcs_get_state(struct phylink_config *config, * guaranteed to be correct, and so any mac_config() implementation must * never reference these fields. * + * (this requires a rewrite - please refer to mac_link_up() for situations + * where the PCS and MAC are not tightly integrated.) + * + * In all negotiation modes, as defined by @mode, @state->pause indicates the + * pause settings which should be applied as follows. If %MLO_PAUSE_AN is not + * set, %MLO_PAUSE_TX and %MLO_PAUSE_RX indicate whether the MAC should send + * pause frames and/or act on received pause frames respectively. Otherwise, + * the results of in-band negotiation/status from the MAC PCS should be used + * to control the MAC pause mode settings. + * * The action performed depends on the currently selected mode: * * %MLO_AN_FIXED, %MLO_AN_PHY: - * Configure the specified @state->speed, @state->duplex and - * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) modes over a link - * specified by @state->interface. @state->advertising may be used, - * but is not required. Other members of @state must be ignored. + * Configure for non-inband negotiation mode, where the link settings + * are completely communicated via mac_link_up(). The physical link + * protocol from the MAC is specified by @state->interface. + * + * @state->advertising may be used, but is not required. + * + * Older drivers (prior to the mac_link_up() change) may use @state->speed, + * @state->duplex and @state->pause to configure the MAC, but this is + * deprecated; such drivers should be converted to use mac_link_up(). * - * Valid state members: interface, speed, duplex, pause, advertising. + * Other members of @state must be ignored. + * + * Valid state members: interface, advertising. + * Deprecated state members: speed, duplex, pause. * * %MLO_AN_INBAND: * place the link in an inband negotiation mode (such as 802.3z @@ -172,11 +189,14 @@ void mac_pcs_get_state(struct phylink_config *config, * mac_pcs_get_state() callback. Changes in link state must be made * by calling phylink_mac_change(). * + * Interface mode specific details are mentioned below. + * * If in 802.3z mode, the link speed is fixed, dependent on the - * @state->interface. Duplex is negotiated, and pause is advertised - * according to @state->an_enabled, @state->pause and - * @state->advertising flags. Beware of MACs which only support full - * duplex at gigabit and higher speeds. + * @state->interface. Duplex and pause modes are negotiated via + * the in-band configuration word. Advertised pause modes are set + * according to the @state->an_enabled and @state->advertising + * flags. Beware of MACs which only support full duplex at gigabit + * and higher speeds. * * If in Cisco SGMII mode, the link speed and duplex mode are passed * in the serial bitstream 16-bit configuration word, and the MAC @@ -220,24 +240,127 @@ void mac_link_down(struct phylink_config *config, unsigned int mode, /** * mac_link_up() - allow the link to come up * @config: a pointer to a &struct phylink_config. + * @phy: any attached phy * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode - * @phy: any attached phy + * @speed: link speed + * @duplex: link duplex + * @tx_pause: link transmit pause enablement status + * @rx_pause: link receive pause enablement status * - * If @mode is not an in-band negotiation mode (as defined by - * phylink_autoneg_inband()), allow the link to come up. If @phy - * is non-%NULL, configure Energy Efficient Ethernet by calling + * Configure the MAC for an established link. + * + * @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link + * settings, and should be used to configure the MAC block appropriately + * where these settings are not automatically conveyed from the PCS block, + * or if in-band negotiation (as defined by phylink_autoneg_inband(@mode)) + * is disabled. + * + * Note that when 802.3z in-band negotiation is in use, it is possible + * that the user wishes to override the pause settings, and this should + * be allowed when considering the implementation of this method. + * + * If in-band negotiation mode is disabled, allow the link to come up. If + * @phy is non-%NULL, configure Energy Efficient Ethernet by calling * phy_init_eee() and perform appropriate MAC configuration for EEE. * Interface type selection must be done in mac_config(). */ -void mac_link_up(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, - struct phy_device *phy); +void mac_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause); +#endif + +/** + * struct phylink_pcs_ops - MAC PCS operations structure. + * @pcs_get_state: read the current MAC PCS link state from the hardware. + * @pcs_config: configure the MAC PCS for the selected mode and state. + * @pcs_an_restart: restart 802.3z BaseX autonegotiation. + * @pcs_link_up: program the PCS for the resolved link configuration + * (where necessary). + */ +struct phylink_pcs_ops { + void (*pcs_get_state)(struct phylink_config *config, + struct phylink_link_state *state); + int (*pcs_config)(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising); + void (*pcs_an_restart)(struct phylink_config *config); + void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, int speed, int duplex); +}; + +#if 0 /* For kernel-doc purposes only. */ +/** + * pcs_get_state() - Read the current inband link state from the hardware + * @config: a pointer to a &struct phylink_config. + * @state: a pointer to a &struct phylink_link_state. + * + * Read the current inband link state from the MAC PCS, reporting the + * current speed in @state->speed, duplex mode in @state->duplex, pause + * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits, + * negotiation completion state in @state->an_complete, and link up state + * in @state->link. If possible, @state->lp_advertising should also be + * populated. + * + * When present, this overrides mac_pcs_get_state() in &struct + * phylink_mac_ops. + */ +void pcs_get_state(struct phylink_config *config, + struct phylink_link_state *state); + +/** + * pcs_config() - Configure the PCS mode and advertisement + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @interface: interface mode to be used + * @advertising: adertisement ethtool link mode mask + * + * Configure the PCS for the operating mode, the interface mode, and set + * the advertisement mask. + * + * When operating in %MLO_AN_INBAND, inband should always be enabled, + * otherwise inband should be disabled. + * + * For SGMII, there is no advertisement from the MAC side, the PCS should + * be programmed to acknowledge the inband word from the PHY. + * + * For 1000BASE-X, the advertisement should be programmed into the PCS. + * + * For most 10GBASE-R, there is no advertisement. + */ +int (*pcs_config)(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, const unsigned long *advertising); + +/** + * pcs_an_restart() - restart 802.3z BaseX autonegotiation + * @config: a pointer to a &struct phylink_config. + * + * When PCS ops are present, this overrides mac_an_restart() in &struct + * phylink_mac_ops. + */ +void (*pcs_an_restart)(struct phylink_config *config); + +/** + * pcs_link_up() - program the PCS for the resolved link configuration + * @config: a pointer to a &struct phylink_config. + * @mode: link autonegotiation mode + * @interface: link &typedef phy_interface_t mode + * @speed: link speed + * @duplex: link duplex + * + * This call will be made just before mac_link_up() to inform the PCS of + * the resolved link parameters. For example, a PCS operating in SGMII + * mode without in-band AN needs to be manually configured for the link + * and duplex setting. Otherwise, this should be a no-op. + */ +void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, + phy_interface_t interface, int speed, int duplex); #endif struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, - const struct phylink_mac_ops *ops); + const struct phylink_mac_ops *mac_ops); +void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); @@ -282,4 +405,13 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); void phylink_set_port_modes(unsigned long *bits); void phylink_helper_basex_speed(struct phylink_link_state *state); +void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, + struct phylink_link_state *state); +int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, + phy_interface_t interface, + const unsigned long *advertising); +void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs); + +void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, + struct phylink_link_state *state); #endif diff --git a/include/linux/pid.h b/include/linux/pid.h index 998ae7d24450..01a0d4e28506 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -62,6 +62,7 @@ struct pid unsigned int level; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; + struct hlist_head inodes; /* wait queue for pidfd notifications */ wait_queue_head_t wait_pidfd; struct rcu_head rcu; diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 2ed6af88794b..4956e362e55e 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -33,7 +33,6 @@ struct pid_namespace { unsigned int level; struct pid_namespace *parent; #ifdef CONFIG_PROC_FS - struct vfsmount *proc_mnt; struct dentry *proc_self; struct dentry *proc_thread_self; #endif @@ -42,7 +41,6 @@ struct pid_namespace { #endif struct user_namespace *user_ns; struct ucounts *ucounts; - struct work_struct proc_work; kgid_t pid_gid; int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 7ce23450a1cb..2aef59df93d7 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -186,7 +186,7 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group, const unsigned **pins, unsigned *num_pins); -#ifdef CONFIG_OF +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL) extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np); #else static inline diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h index bdaaf537604a..95d852aef130 100644 --- a/include/linux/platform_data/dmtimer-omap.h +++ b/include/linux/platform_data/dmtimer-omap.h @@ -30,12 +30,12 @@ struct omap_dm_timer_ops { int (*stop)(struct omap_dm_timer *timer); int (*set_source)(struct omap_dm_timer *timer, int source); - int (*set_load)(struct omap_dm_timer *timer, int autoreload, - unsigned int value); + int (*set_load)(struct omap_dm_timer *timer, unsigned int value); int (*set_match)(struct omap_dm_timer *timer, int enable, unsigned int match); int (*set_pwm)(struct omap_dm_timer *timer, int def_on, - int toggle, int trigger); + int toggle, int trigger, int autoreload); + int (*get_pwm_status)(struct omap_dm_timer *timer); int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); unsigned int (*read_counter)(struct omap_dm_timer *timer); diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h deleted file mode 100644 index eb8a6860e816..000000000000 --- a/include/linux/platform_data/leds-kirkwood-ns2.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Platform data structure for Network Space v2 LED driver - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __LEDS_KIRKWOOD_NS2_H -#define __LEDS_KIRKWOOD_NS2_H - -enum ns2_led_modes { - NS_V2_LED_OFF, - NS_V2_LED_ON, - NS_V2_LED_SATA, -}; - -struct ns2_led_modval { - enum ns2_led_modes mode; - int cmd_level; - int slow_level; -}; - -struct ns2_led { - const char *name; - const char *default_trigger; - unsigned cmd; - unsigned slow; - int num_modes; - struct ns2_led_modval *modval; -}; - -struct ns2_led_platform_data { - int num_leds; - struct ns2_led *leds; -}; - -#endif /* __LEDS_KIRKWOOD_NS2_H */ diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 6c006078c8a1..0434f68eda86 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -37,5 +37,6 @@ struct esdhc_platform_data { unsigned int delay_line; unsigned int tuning_step; /* The delay cell steps in tuning procedure */ unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ + unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h index dd5971937a64..644af1d89cfa 100644 --- a/include/linux/platform_data/pm33xx.h +++ b/include/linux/platform_data/pm33xx.h @@ -46,9 +46,13 @@ struct am33xx_pm_sram_addr { }; struct am33xx_pm_platform_data { - int (*init)(void); + int (*init)(int (*idle)(u32 wfi_flags)); + int (*deinit)(void); int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long), unsigned long args); + int (*cpu_suspend)(int (*fn)(unsigned long), unsigned long args); + void (*begin_suspend)(void); + void (*finish_suspend)(void); struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); void __iomem *(*get_rtc_base_addr)(void); void (*save_context)(void); diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h deleted file mode 100644 index 7e3a16097672..000000000000 --- a/include/linux/platform_data/remoteproc-omap.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Remote Processor - omap-specific bits - * - * Copyright (C) 2011 Texas Instruments, Inc. - * Copyright (C) 2011 Google, Inc. - */ - -#ifndef _PLAT_REMOTEPROC_H -#define _PLAT_REMOTEPROC_H - -struct rproc_ops; -struct platform_device; - -/* - * struct omap_rproc_pdata - omap remoteproc's platform data - * @name: the remoteproc's name - * @oh_name: omap hwmod device - * @oh_name_opt: optional, secondary omap hwmod device - * @firmware: name of firmware file to load - * @mbox_name: name of omap mailbox device to use with this rproc - * @ops: start/stop rproc handlers - * @device_enable: omap-specific handler for enabling a device - * @device_shutdown: omap-specific handler for shutting down a device - * @set_bootaddr: omap-specific handler for setting the rproc boot address - */ -struct omap_rproc_pdata { - const char *name; - const char *oh_name; - const char *oh_name_opt; - const char *firmware; - const char *mbox_name; - const struct rproc_ops *ops; - int (*device_enable)(struct platform_device *pdev); - int (*device_shutdown)(struct platform_device *pdev); - void (*set_bootaddr)(u32); -}; - -#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE) - -void __init omap_rproc_reserve_cma(void); - -#else - -static inline void __init omap_rproc_reserve_cma(void) -{ -} - -#endif - -#endif /* _PLAT_REMOTEPROC_H */ diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h index 4f733a411d18..ca8337695c2a 100644 --- a/include/linux/platform_data/simplefb.h +++ b/include/linux/platform_data/simplefb.h @@ -10,7 +10,7 @@ #include <drm/drm_fourcc.h> #include <linux/fb.h> -#include <linux/kernel.h> +#include <linux/types.h> /* format array, use it to initialize a "struct simplefb_format" array */ #define SIMPLEFB_FORMATS \ diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h index 0bf9fddb8306..3b400b1919a9 100644 --- a/include/linux/platform_data/spi-omap2-mcspi.h +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -11,6 +11,7 @@ struct omap2_mcspi_platform_config { unsigned short num_cs; unsigned int regs_offset; unsigned int pin_dir:1; + size_t max_xfer_len; }; struct omap2_mcspi_device_config { diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 2cbde6542849..c59999ce044e 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -17,6 +17,7 @@ enum ti_sysc_module_type { TI_SYSC_OMAP4_MCASP, TI_SYSC_OMAP4_USB_HOST_FS, TI_SYSC_DRA7_MCAN, + TI_SYSC_PRUSS, }; struct ti_sysc_cookie { @@ -49,6 +50,9 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_PRUSS BIT(24) +#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23) +#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22) #define SYSC_QUIRK_CLKDM_NOAUTO BIT(21) #define SYSC_QUIRK_FORCE_MSTANDBY BIT(20) #define SYSC_MODULE_QUIRK_AESS BIT(19) @@ -141,6 +145,7 @@ struct clk; struct ti_sysc_platform_data { struct of_dev_auxdata *auxdata; + bool (*soc_type_gp)(void); int (*init_clockdomain)(struct device *dev, struct clk *fck, struct clk *ick, struct ti_sysc_cookie *cookie); void (*clkdm_deny_idle)(struct device *dev, diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 276a03c24691..bdc35753ef7c 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -24,7 +24,7 @@ struct platform_device { int id; bool id_auto; struct device dev; - u64 dma_mask; + u64 platform_dma_mask; u32 num_resources; struct resource *resource; @@ -55,6 +55,9 @@ extern struct device * platform_find_device_by_driver(struct device *start, const struct device_driver *drv); extern void __iomem * +devm_platform_get_and_ioremap_resource(struct platform_device *pdev, + unsigned int index, struct resource **res); +extern void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index); extern void __iomem * @@ -89,7 +92,7 @@ struct platform_device_info { size_t size_data; u64 dma_mask; - struct property_entry *properties; + const struct property_entry *properties; }; extern struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo); diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 19eafca5680e..4a69d4af3ff8 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -1,22 +1,20 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_PM_QOS_H -#define _LINUX_PM_QOS_H -/* interface for the pm_qos_power infrastructure of the linux kernel. +/* + * Definitions related to Power Management Quality of Service (PM QoS). * - * Mark Gross <mgross@linux.intel.com> + * Copyright (C) 2020 Intel Corporation + * + * Authors: + * Mark Gross <mgross@linux.intel.com> + * Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ + +#ifndef _LINUX_PM_QOS_H +#define _LINUX_PM_QOS_H + #include <linux/plist.h> #include <linux/notifier.h> #include <linux/device.h> -#include <linux/workqueue.h> - -enum { - PM_QOS_RESERVED = 0, - PM_QOS_CPU_DMA_LATENCY, - - /* insert new class ID */ - PM_QOS_NUM_CLASSES, -}; enum pm_qos_flags_status { PM_QOS_FLAGS_UNDEFINED = -1, @@ -29,7 +27,7 @@ enum pm_qos_flags_status { #define PM_QOS_LATENCY_ANY S32_MAX #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) -#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) +#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS @@ -40,22 +38,10 @@ enum pm_qos_flags_status { #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) -struct pm_qos_request { - struct plist_node node; - int pm_qos_class; - struct delayed_work work; /* for pm_qos_update_request_timeout */ -}; - -struct pm_qos_flags_request { - struct list_head node; - s32 flags; /* Do not change to 64 bit */ -}; - enum pm_qos_type { PM_QOS_UNITIALIZED, PM_QOS_MAX, /* return the largest value */ PM_QOS_MIN, /* return the smallest value */ - PM_QOS_SUM /* return the sum */ }; /* @@ -72,6 +58,16 @@ struct pm_qos_constraints { struct blocking_notifier_head *notifiers; }; +struct pm_qos_request { + struct plist_node node; + struct pm_qos_constraints *qos; +}; + +struct pm_qos_flags_request { + struct list_head node; + s32 flags; /* Do not change to 64 bit */ +}; + struct pm_qos_flags { struct list_head list; s32 effective_flags; /* Do not change to 64 bit */ @@ -140,24 +136,31 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) return req->dev != NULL; } +s32 pm_qos_read_value(struct pm_qos_constraints *c); int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value); bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val); -void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, - s32 value); -void pm_qos_update_request(struct pm_qos_request *req, - s32 new_value); -void pm_qos_update_request_timeout(struct pm_qos_request *req, - s32 new_value, unsigned long timeout_us); -void pm_qos_remove_request(struct pm_qos_request *req); - -int pm_qos_request(int pm_qos_class); -int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); -int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); -int pm_qos_request_active(struct pm_qos_request *req); -s32 pm_qos_read_value(struct pm_qos_constraints *c); + +#ifdef CONFIG_CPU_IDLE +s32 cpu_latency_qos_limit(void); +bool cpu_latency_qos_request_active(struct pm_qos_request *req); +void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value); +void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value); +void cpu_latency_qos_remove_request(struct pm_qos_request *req); +#else +static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; } +static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req) +{ + return false; +} +static inline void cpu_latency_qos_add_request(struct pm_qos_request *req, + s32 value) {} +static inline void cpu_latency_qos_update_request(struct pm_qos_request *req, + s32 new_value) {} +static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {} +#endif #ifdef CONFIG_PM enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 22af69d237a6..3bdcbce8141a 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); -extern int pm_runtime_get_if_in_use(struct device *dev); +extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); @@ -60,6 +60,11 @@ extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device *dev); +static inline int pm_runtime_get_if_in_use(struct device *dev) +{ + return pm_runtime_get_if_active(dev, false); +} + static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; @@ -143,6 +148,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev) { return -EINVAL; } +static inline int pm_runtime_get_if_active(struct device *dev, + bool ign_usage_count) +{ + return -EINVAL; +} static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 3b12fd28af78..b18dca67253d 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -379,7 +379,7 @@ struct pnp_id { }; struct pnp_driver { - char *name; + const char *name; const struct pnp_device_id *id_table; unsigned int flags; int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id); diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 3d10c84a97a9..e3f0f8585da4 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -69,7 +69,7 @@ static inline int clockid_to_fd(const clockid_t clk) struct cpu_timer { struct timerqueue_node node; struct timerqueue_head *head; - struct task_struct *task; + struct pid *pid; struct list_head elist; int firing; }; diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index ad19e68e1fc3..ae94dcebd936 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -248,7 +248,7 @@ struct charger_manager { u64 charging_end_time; }; -#ifdef CONFIG_CHARGER_MANAGER +#if IS_ENABLED(CONFIG_CHARGER_MANAGER) extern void cm_notify_event(struct power_supply *psy, enum cm_event_types type, char *msg); #else diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bbb68dba37cc..bc3f1aecaa19 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -322,4 +322,34 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier, #endif +/** + * migrate_disable - Prevent migration of the current task + * + * Maps to preempt_disable() which also disables preemption. Use + * migrate_disable() to annotate that the intent is to prevent migration, + * but not necessarily preemption. + * + * Can be invoked nested like preempt_disable() and needs the corresponding + * number of migrate_enable() invocations. + */ +static __always_inline void migrate_disable(void) +{ + preempt_disable(); +} + +/** + * migrate_enable - Allow migration of the current task + * + * Counterpart to migrate_disable(). + * + * As migrate_disable() can be invoked nested, only the outermost invocation + * reenables migration. + * + * Currently mapped to preempt_enable(). + */ +static __always_inline void migrate_enable(void) +{ + preempt_enable(); +} + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 3dfa92633af3..45c05fd9c99d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -5,6 +5,7 @@ #ifndef _LINUX_PROC_FS_H #define _LINUX_PROC_FS_H +#include <linux/compiler.h> #include <linux/types.h> #include <linux/fs.h> @@ -12,7 +13,21 @@ struct proc_dir_entry; struct seq_file; struct seq_operations; +enum { + /* + * All /proc entries using this ->proc_ops instance are never removed. + * + * If in doubt, ignore this flag. + */ +#ifdef MODULE + PROC_ENTRY_PERMANENT = 0U, +#else + PROC_ENTRY_PERMANENT = 1U << 0, +#endif +}; + struct proc_ops { + unsigned int proc_flags; int (*proc_open)(struct inode *, struct file *); ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); @@ -25,14 +40,14 @@ struct proc_ops { #endif int (*proc_mmap)(struct file *, struct vm_area_struct *); unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -}; +} __randomize_layout; #ifdef CONFIG_PROC_FS typedef int (*proc_write_t)(struct file *, char *, size_t); extern void proc_root_init(void); -extern void proc_flush_task(struct task_struct *); +extern void proc_flush_pid(struct pid *); extern struct proc_dir_entry *proc_symlink(const char *, struct proc_dir_entry *, const char *); @@ -105,7 +120,7 @@ static inline void proc_root_init(void) { } -static inline void proc_flush_task(struct task_struct *task) +static inline void proc_flush_pid(struct pid *pid) { } diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 4626b1ac3b6c..6abe85c34681 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -50,16 +50,11 @@ enum { #ifdef CONFIG_PROC_FS -extern int pid_ns_prepare_proc(struct pid_namespace *ns); -extern void pid_ns_release_proc(struct pid_namespace *ns); extern int proc_alloc_inum(unsigned int *pino); extern void proc_free_inum(unsigned int inum); #else /* CONFIG_PROC_FS */ -static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } -static inline void pid_ns_release_proc(struct pid_namespace *ns) {} - static inline int proc_alloc_inum(unsigned int *inum) { *inum = 1; @@ -85,6 +80,8 @@ typedef struct ns_common *ns_get_path_helper_t(void *); extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, void *private_data); +extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino); + extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); extern void nsfs_init(void); diff --git a/include/linux/psi.h b/include/linux/psi.h index 7b3de7321219..7361023f3fdd 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -17,6 +17,8 @@ extern struct psi_group psi_system; void psi_init(void); void psi_task_change(struct task_struct *task, int clear, int set); +void psi_task_switch(struct task_struct *prev, struct task_struct *next, + bool sleep); void psi_memstall_tick(struct task_struct *task, int cpu); void psi_memstall_enter(unsigned long *flags); diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 07aaf9b82241..4b7258495a04 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -14,13 +14,21 @@ enum psi_task_count { NR_IOWAIT, NR_MEMSTALL, NR_RUNNING, - NR_PSI_TASK_COUNTS = 3, + /* + * This can't have values other than 0 or 1 and could be + * implemented as a bit flag. But for now we still have room + * in the first cacheline of psi_group_cpu, and this way we + * don't have to special case any state tracking for it. + */ + NR_ONCPU, + NR_PSI_TASK_COUNTS = 4, }; /* Task state bitmasks */ #define TSK_IOWAIT (1 << NR_IOWAIT) #define TSK_MEMSTALL (1 << NR_MEMSTALL) #define TSK_RUNNING (1 << NR_RUNNING) +#define TSK_ONCPU (1 << NR_ONCPU) /* Resources that workloads could be stalled on */ enum psi_res { diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index c64a1ef87240..121a7eda4593 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -223,6 +223,12 @@ extern s32 scaled_ppm_to_ppb(long ppm); /** * ptp_find_pin() - obtain the pin index of a given auxiliary function * + * The caller must hold ptp_clock::pincfg_mux. Drivers do not have + * access to that mutex as ptp_clock is an opaque type. However, the + * core code acquires the mutex before invoking the driver's + * ptp_clock_info::enable() callback, and so drivers may call this + * function from that context. + * * @ptp: The clock obtained from ptp_clock_register(). * @func: One of the ptp_pin_function enumerated values. * @chan: The particular functional channel to find. @@ -234,6 +240,19 @@ int ptp_find_pin(struct ptp_clock *ptp, enum ptp_pin_function func, unsigned int chan); /** + * ptp_find_pin_unlocked() - wrapper for ptp_find_pin() + * + * This function acquires the ptp_clock::pincfg_mux mutex before + * invoking ptp_find_pin(). Instead of using this function, drivers + * should most likely call ptp_find_pin() directly from their + * ptp_clock_info::enable() method. + * + */ + +int ptp_find_pin_unlocked(struct ptp_clock *ptp, + enum ptp_pin_function func, unsigned int chan); + +/** * ptp_schedule_worker() - schedule ptp auxiliary work * * @ptp: The clock obtained from ptp_clock_register(). diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 8ea265a022fd..06086cb93b6f 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -16,8 +16,6 @@ struct platform_pwm_backlight_data { unsigned int *levels; unsigned int post_pwm_on_delay; unsigned int pwm_off_delay; - /* TODO remove once all users are switched to gpiod_* API */ - int enable_gpio; int (*init)(struct device *dev); int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 2dd0a9ed5b36..733fad7dfbed 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -97,6 +97,11 @@ struct qed_chain_u32 { u32 cons_idx; }; +struct addr_tbl_entry { + void *virt_addr; + dma_addr_t dma_map; +}; + struct qed_chain { /* fastpath portion of the chain - required for commands such * as produce / consume. @@ -107,10 +112,11 @@ struct qed_chain { /* Fastpath portions of the PBL [if exists] */ struct { - /* Table for keeping the virtual addresses of the chain pages, - * respectively to the physical addresses in the pbl table. + /* Table for keeping the virtual and physical addresses of the + * chain pages, respectively to the physical addresses + * in the pbl table. */ - void **pp_virt_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { struct qed_chain_pbl_u16 u16; @@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain, *(u32 *)page_to_inc = 0; page_index = *(u32 *)page_to_inc; } - *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; + *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr; } } @@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->pbl_sp.p_phys_table = 0; p_chain->pbl_sp.p_virt_table = NULL; - p_chain->pbl.pp_virt_addr_tbl = NULL; + p_chain->pbl.pp_addr_tbl = NULL; } /** @@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain, static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, void *p_virt_pbl, dma_addr_t p_phys_pbl, - void **pp_virt_addr_tbl) + struct addr_tbl_entry *pp_addr_tbl) { p_chain->pbl_sp.p_phys_table = p_phys_pbl; p_chain->pbl_sp.p_virt_table = p_virt_pbl; - p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; + p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } /** @@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) break; case QED_CHAIN_MODE_PBL: last_page_idx = p_chain->page_cnt - 1; - p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr; break; } /* p_virt_addr points at this stage to the last page of the chain */ @@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) page_cnt = qed_chain_get_page_cnt(p_chain); for (i = 0; i < page_cnt; i++) - memset(p_chain->pbl.pp_virt_addr_tbl[i], 0, + memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, QED_CHAIN_PAGE_SIZE); } diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h new file mode 100644 index 000000000000..37dd3f40cd31 --- /dev/null +++ b/include/linux/raid/detect.h @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +void md_autodetect_dev(dev_t dev); diff --git a/include/linux/random.h b/include/linux/random.h index d319f9a1e429..45e1f8fa742b 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -7,6 +7,8 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H +#include <linux/bug.h> +#include <linux/kernel.h> #include <linux/list.h> #include <linux/once.h> @@ -185,6 +187,26 @@ static inline bool __must_check arch_get_random_seed_int(unsigned int *v) } #endif +/* + * Called from the boot CPU during startup; not valid to call once + * secondary CPUs are up and preemption is possible. + */ +#ifndef arch_get_random_seed_long_early +static inline bool __init arch_get_random_seed_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_seed_long(v); +} +#endif + +#ifndef arch_get_random_long_early +static inline bool __init arch_get_random_long_early(unsigned long *v) +{ + WARN_ON(system_state != SYSTEM_BOOTING); + return arch_get_random_long(v); +} +#endif + /* Pseudo random number generator from numerical recipes. */ static inline u32 next_pseudo_random32(u32 seed) { diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 9f313e4999fe..8214cdc715f2 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -60,9 +60,9 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) #define __list_check_rcu(dummy, cond, extra...) \ ({ \ check_arg_count_one(extra); \ - RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ + RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ "RCU-list traversed in non-reader section!"); \ - }) + }) #else #define __list_check_rcu(dummy, cond, extra...) \ ({ check_arg_count_one(extra); }) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index b2b2dc990da9..045c28b71f4f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -83,6 +83,7 @@ void rcu_scheduler_starting(void); static inline void rcu_scheduler_starting(void) { } #endif /* #else #ifndef CONFIG_SRCU */ static inline void rcu_end_inkernel_boot(void) { } +static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } static inline void rcu_momentary_dyntick_idle(void) { } static inline void kfree_rcu_scheduler_running(void) { } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 2f787b9029d1..45f3f66bb04d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -54,6 +54,7 @@ void exit_rcu(void); void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); +bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); #ifndef CONFIG_PREEMPTION void rcu_all_qs(void); diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 75c97e4bbc57..2ffe1ee6d482 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -3,6 +3,7 @@ #define _LINUX_RCUWAIT_H_ #include <linux/rcupdate.h> +#include <linux/sched/signal.h> /* * rcuwait provides a way of blocking and waking up a single @@ -30,23 +31,30 @@ extern void rcuwait_wake_up(struct rcuwait *w); * The caller is responsible for locking around rcuwait_wait_event(), * such that writes to @task are properly serialized. */ -#define rcuwait_wait_event(w, condition) \ +#define rcuwait_wait_event(w, condition, state) \ ({ \ + int __ret = 0; \ rcu_assign_pointer((w)->task, current); \ for (;;) { \ /* \ * Implicit barrier (A) pairs with (B) in \ * rcuwait_wake_up(). \ */ \ - set_current_state(TASK_UNINTERRUPTIBLE); \ + set_current_state(state); \ if (condition) \ break; \ \ + if (signal_pending_state(state, current)) { \ + __ret = -EINTR; \ + break; \ + } \ + \ schedule(); \ } \ \ WRITE_ONCE((w)->task, NULL); \ __set_current_state(TASK_RUNNING); \ + __ret; \ }) #endif /* _LINUX_RCUWAIT_H_ */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f0a092a1a96d..40b07168fd8e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -461,8 +461,8 @@ struct regmap_config { * @range_max: Address of the highest register in virtual range. * * @selector_reg: Register with selector field. - * @selector_mask: Bit shift for selector value. - * @selector_shift: Bit mask for selector value. + * @selector_mask: Bit mask for selector value. + * @selector_shift: Bit shift for selector value. * * @window_start: Address of first (lowest) register in data window. * @window_len: Number of registers in data window. diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 9a911bb5fb61..29d920516e0b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -277,9 +277,9 @@ enum regulator_type { * @curr_table: Current limit mapping table (if table based mapping) * * @vsel_range_reg: Register for range selector when using pickable ranges - * and regulator_regmap_X_voltage_X_pickable functions. + * and ``regulator_map_*_voltage_*_pickable`` functions. * @vsel_range_mask: Mask for register bitfield used for range selector - * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ + * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*`` * @vsel_mask: Mask for register bitfield used for selector * @vsel_step: Specify the resolution of selector stepping when setting * voltage. If 0, then no stepping is done (requested selector is diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 16ad66683ad0..9c07d7958c53 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -329,7 +329,7 @@ struct rproc; struct rproc_mem_entry { void *va; dma_addr_t dma; - int len; + size_t len; u32 da; void *priv; char name[32]; @@ -369,12 +369,14 @@ enum rsc_handling_status { * expects to find it * @sanity_check: sanity check the fw image * @get_boot_addr: get boot address to entry point specified in firmware + * @panic: optional callback to react to system panic, core will delay + * panic at least the returned number of milliseconds */ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, int len); + void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, int avail); @@ -382,7 +384,8 @@ struct rproc_ops { struct rproc *rproc, const struct firmware *fw); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); - u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); + u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); + unsigned long (*panic)(struct rproc *rproc); }; /** @@ -498,7 +501,7 @@ struct rproc { int num_traces; struct list_head carveouts; struct list_head mappings; - u32 bootaddr; + u64 bootaddr; struct list_head rvdevs; struct list_head subdevs; struct idr notifyids; @@ -514,6 +517,7 @@ struct rproc { bool auto_boot; struct list_head dump_segments; int nb_vdev; + u8 elf_class; }; /** @@ -599,13 +603,13 @@ void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem); struct rproc_mem_entry * rproc_mem_entry_init(struct device *dev, - void *va, dma_addr_t dma, int len, u32 da, + void *va, dma_addr_t dma, size_t len, u32 da, int (*alloc)(struct rproc *, struct rproc_mem_entry *), int (*release)(struct rproc *, struct rproc_mem_entry *), const char *name, ...); struct rproc_mem_entry * -rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len, +rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, u32 da, const char *name, ...); int rproc_boot(struct rproc *rproc); diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h new file mode 100644 index 000000000000..0820edc0ab7d --- /dev/null +++ b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (C) 2019 Linaro Ltd. */ + +#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__ +#define __QCOM_Q6V5_IPA_NOTIFY_H__ + +#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) + +#include <linux/remoteproc.h> + +enum qcom_rproc_event { + MODEM_STARTING = 0, /* Modem is about to be started */ + MODEM_RUNNING = 1, /* Startup complete; modem is operational */ + MODEM_STOPPING = 2, /* Modem is about to shut down */ + MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */ + MODEM_OFFLINE = 4, /* Modem is now offline */ + MODEM_REMOVING = 5, /* Modem is about to be removed */ +}; + +typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event); + +struct qcom_rproc_ipa_notify { + struct rproc_subdev subdev; + + qcom_ipa_notify_t notify; + void *data; +}; + +/** + * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice + * @rproc: rproc handle + * @ipa_notify: IPA notification subdevice handle + * + * Register the @ipa_notify subdevice with the @rproc so modem events + * can be sent to IPA when they occur. + * + * This is defined in "qcom_q6v5_ipa_notify.c". + */ +void qcom_add_ipa_notify_subdev(struct rproc *rproc, + struct qcom_rproc_ipa_notify *ipa_notify); + +/** + * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice + * @rproc: rproc handle + * @ipa_notify: IPA notification subdevice handle + * + * This is defined in "qcom_q6v5_ipa_notify.c". + */ +void qcom_remove_ipa_notify_subdev(struct rproc *rproc, + struct qcom_rproc_ipa_notify *ipa_notify); + +/** + * qcom_register_ipa_notify() - Register IPA notification function + * @rproc: Remote processor handle + * @notify: Non-null IPA notification callback function pointer + * @data: Data supplied to IPA notification callback function + * + * @Return: 0 if successful, or a negative error code otherwise + * + * This is defined in "qcom_q6v5_mss.c". + */ +int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, + void *data); +/** + * qcom_deregister_ipa_notify() - Deregister IPA notification function + * @rproc: Remote processor handle + * + * This is defined in "qcom_q6v5_mss.c". + */ +void qcom_deregister_ipa_notify(struct rproc *rproc); + +#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ + +struct qcom_rproc_ipa_notify { /* empty */ }; + +#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ +#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ + +#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ + +#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index beb9a9da1699..70ebef866cc8 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key( /** * rhashtable_lookup_get_insert_key - lookup and insert object into hash table * @ht: hash table + * @key: key * @obj: pointer to hash head inside object * @params: hash table parameters - * @data: pointer to element data already in hashes * * Just like rhashtable_lookup_insert_key(), but this function returns the * object if it exists, NULL if it does not and the insertion was successful, diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index df0124eabece..c76b2f3b3ac4 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -135,10 +135,10 @@ void ring_buffer_read_finish(struct ring_buffer_iter *iter); struct ring_buffer_event * ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); -struct ring_buffer_event * -ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); +void ring_buffer_iter_advance(struct ring_buffer_iter *iter); void ring_buffer_iter_reset(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter); +bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 23990bd29040..bba3db3f7efa 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -34,18 +34,6 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); } -static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) -{ - rtc_time64_to_tm(time, tm); -} - -static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) -{ - *time = rtc_tm_to_time64(tm); - - return 0; -} - #include <linux/device.h> #include <linux/seq_file.h> #include <linux/cdev.h> diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 857a72ceb794..3bd03e18061c 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -22,7 +22,11 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +# define RW_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } #else # define RW_DEP_MAP_INIT(lockname) #endif diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 00d6054687dd..7e5b2a4eb560 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -53,12 +53,6 @@ struct rw_semaphore { #endif }; -/* - * Setting all bits of the owner field except bit 0 will indicate - * that the rwsem is writer-owned with an unknown owner. - */ -#define RWSEM_OWNER_UNKNOWN (-2L) - /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { @@ -71,7 +65,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) /* Common initializer macros and functions */ #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } +# define __RWSEM_DEP_MAP_INIT(lockname) \ + , .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 04278493bf15..4418f5cb8324 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -356,28 +356,30 @@ struct util_est { } __attribute__((__aligned__(sizeof(u64)))); /* - * The load_avg/util_avg accumulates an infinite geometric series - * (see __update_load_avg() in kernel/sched/fair.c). + * The load/runnable/util_avg accumulates an infinite geometric series + * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). * * [load_avg definition] * * load_avg = runnable% * scale_load_down(load) * - * where runnable% is the time ratio that a sched_entity is runnable. - * For cfs_rq, it is the aggregated load_avg of all runnable and - * blocked sched_entities. + * [runnable_avg definition] + * + * runnable_avg = runnable% * SCHED_CAPACITY_SCALE * * [util_avg definition] * * util_avg = running% * SCHED_CAPACITY_SCALE * - * where running% is the time ratio that a sched_entity is running on - * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable - * and blocked sched_entities. + * where runnable% is the time ratio that a sched_entity is runnable and + * running% the time ratio that a sched_entity is running. + * + * For cfs_rq, they are the aggregated values of all runnable and blocked + * sched_entities. * - * load_avg and util_avg don't direcly factor frequency scaling and CPU - * capacity scaling. The scaling is done through the rq_clock_pelt that - * is used for computing those signals (see update_rq_clock_pelt()) + * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU + * capacity scaling. The scaling is done through the rq_clock_pelt that is used + * for computing those signals (see update_rq_clock_pelt()) * * N.B., the above ratios (runnable% and running%) themselves are in the * range of [0, 1]. To do fixed point arithmetics, we therefore scale them @@ -401,11 +403,11 @@ struct util_est { struct sched_avg { u64 last_update_time; u64 load_sum; - u64 runnable_load_sum; + u64 runnable_sum; u32 util_sum; u32 period_contrib; unsigned long load_avg; - unsigned long runnable_load_avg; + unsigned long runnable_avg; unsigned long util_avg; struct util_est util_est; } ____cacheline_aligned; @@ -449,7 +451,6 @@ struct sched_statistics { struct sched_entity { /* For load-balancing: */ struct load_weight load; - unsigned long runnable_weight; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; @@ -470,6 +471,8 @@ struct sched_entity { struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ struct cfs_rq *my_q; + /* cached value of my_q->h_nr_running */ + unsigned long runnable_weight; #endif #ifdef CONFIG_SMP @@ -782,9 +785,12 @@ struct task_struct { unsigned frozen:1; #endif #ifdef CONFIG_BLK_CGROUP - /* to be used once the psi infrastructure lands upstream. */ unsigned use_memdelay:1; #endif +#ifdef CONFIG_PSI + /* Stalled due to lack of memory */ + unsigned in_memstall:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -939,8 +945,8 @@ struct task_struct { struct seccomp seccomp; /* Thread group tracking: */ - u32 parent_exec_id; - u32 self_exec_id; + u64 parent_exec_id; + u64 self_exec_id; /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ spinlock_t alloc_lock; @@ -970,6 +976,7 @@ struct task_struct { #ifdef CONFIG_TRACE_IRQFLAGS unsigned int irq_events; + unsigned int hardirq_threaded; unsigned long hardirq_enable_ip; unsigned long hardirq_disable_ip; unsigned int hardirq_enable_event; @@ -982,6 +989,7 @@ struct task_struct { unsigned int softirq_enable_event; int softirqs_enabled; int softirq_context; + int irq_config; #endif #ifdef CONFIG_LOCKDEP @@ -1477,7 +1485,6 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 88050259c466..3e5b090c16d4 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -10,6 +10,8 @@ #include <linux/cred.h> #include <linux/refcount.h> #include <linux/posix-timers.h> +#include <linux/mm_types.h> +#include <asm/ptrace.h> /* * Types defining task->signal and task->sighand and APIs using them: @@ -224,7 +226,14 @@ struct signal_struct { struct mutex cred_guard_mutex; /* guard against foreign influences on * credential calculations - * (notably. ptrace) */ + * (notably. ptrace) + * Deprecated do not use in new code. + * Use exec_update_mutex instead. + */ + struct mutex exec_update_mutex; /* Held while task_struct is being + * updated during exec, and may have + * inconsistent permissions. + */ } __randomize_layout; /* @@ -370,6 +379,20 @@ static inline int signal_pending_state(long state, struct task_struct *p) } /* + * This should only be used in fault handlers to decide whether we + * should stop the current fault routine to handle the signals + * instead, especially with the case where we've got interrupted with + * a VM_FAULT_RETRY. + */ +static inline bool fault_signal_pending(vm_fault_t fault_flags, + struct pt_regs *regs) +{ + return unlikely((fault_flags & VM_FAULT_RETRY) && + (fatal_signal_pending(current) || + (user_mode(regs) && signal_pending(current)))); +} + +/* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index f1879884238e..38359071236a 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -13,6 +13,7 @@ struct task_struct; struct rusage; union thread_union; +struct css_set; /* All the bits taken by the old clone syscall. */ #define CLONE_LEGACY_FLAGS 0xffffffffULL @@ -29,6 +30,9 @@ struct kernel_clone_args { pid_t *set_tid; /* Number of elements in *set_tid */ size_t set_tid_size; + int cgroup; + struct cgroup *cgrp; + struct css_set *cset; }; /* diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index f341163fedc9..af9319e4cfb9 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -225,6 +225,14 @@ unsigned long arch_scale_cpu_capacity(int cpu) } #endif +#ifndef arch_scale_thermal_pressure +static __always_inline +unsigned long arch_scale_thermal_pressure(int cpu) +{ + return 0; +} +#endif + static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 03583b6d1416..4192369b8418 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -7,7 +7,8 @@ #define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ SECCOMP_FILTER_FLAG_LOG | \ SECCOMP_FILTER_FLAG_SPEC_ALLOW | \ - SECCOMP_FILTER_FLAG_NEW_LISTENER) + SECCOMP_FILTER_FLAG_NEW_LISTENER | \ + SECCOMP_FILTER_FLAG_TSYNC_ESRCH) #ifdef CONFIG_SECCOMP diff --git a/include/linux/security.h b/include/linux/security.h index 64b19f050343..a8d9310472df 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -30,7 +30,6 @@ #include <linux/err.h> #include <linux/string.h> #include <linux/mm.h> -#include <linux/fs.h> struct linux_binprm; struct cred; diff --git a/include/linux/selection.h b/include/linux/selection.h index e2c1f96bf059..5b890ef5b59f 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h @@ -11,8 +11,8 @@ #include <linux/tiocl.h> #include <linux/vt_buffer.h> -extern struct vc_data *sel_cons; struct tty_struct; +struct vc_data; extern void clear_selection(void); extern int set_selection_user(const struct tiocl_selection __user *sel, @@ -24,6 +24,8 @@ extern int sel_loadlut(char __user *p); extern int mouse_reporting(void); extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry); +bool vc_is_sel(struct vc_data *vc); + extern int console_blanked; extern const unsigned char color_table[]; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 770c2bf3aa43..1672cf6f7614 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -21,7 +21,6 @@ struct seq_file { size_t pad_until; loff_t index; loff_t read_pos; - u64 version; struct mutex lock; const struct seq_operations *op; int poll_event; diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 6a8e8c48c882..6545f8cfc8fa 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -81,6 +81,7 @@ struct uart_8250_em485 { struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */ struct hrtimer *active_timer; /* pointer to active timer */ struct uart_8250_port *port; /* for hrtimer callbacks */ + unsigned int tx_stopped:1; /* tx is currently stopped */ }; /* @@ -132,6 +133,8 @@ struct uart_8250_port { void (*dl_write)(struct uart_8250_port *, int); struct uart_8250_em485 *em485; + void (*rs485_start_tx)(struct uart_8250_port *); + void (*rs485_stop_tx)(struct uart_8250_port *); /* Serial port overrun backoff */ struct delayed_work overrun_backoff; @@ -176,6 +179,7 @@ void serial8250_set_defaults(struct uart_8250_port *up); void serial8250_console_write(struct uart_8250_port *up, const char *s, unsigned int count); int serial8250_console_setup(struct uart_port *port, char *options, bool probe); +int serial8250_console_exit(struct uart_port *port); extern void serial8250_set_isa_configurator(void (*v) (int port, struct uart_port *up, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 52404ef1694e..92f5eba86052 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -243,6 +243,7 @@ struct uart_port { unsigned long sysrq; /* sysrq timeout */ unsigned int sysrq_ch; /* char for sysrq */ unsigned char has_sysrq; + unsigned char sysrq_seq; /* index in sysrq_toggle_seq */ unsigned char hub6; /* this should be in the 8250 driver */ unsigned char suspended; @@ -461,8 +462,7 @@ extern void uart_insert_char(struct uart_port *port, unsigned int status, extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch); extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch); -extern void uart_unlock_and_check_sysrq(struct uart_port *port, - unsigned long irqflags); +extern void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags); extern int uart_handle_break(struct uart_port *port); /* diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index d56fefef8905..7a35a6901221 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -78,6 +78,7 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(unsigned int type, bool frontswap, unsigned long *fs_pages_to_unuse); +extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -114,15 +115,6 @@ static inline bool shmem_file(struct file *file) extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); -#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE -extern bool shmem_huge_enabled(struct vm_area_struct *vma); -#else -static inline bool shmem_huge_enabled(struct vm_area_struct *vma) -{ - return false; -} -#endif - #ifdef CONFIG_SHMEM extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, diff --git a/include/linux/signal.h b/include/linux/signal.h index 1a5f88316b08..05bacd2ab135 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -444,12 +444,12 @@ void signals_init(void); int restore_altstack(const stack_t __user *); int __save_altstack(stack_t __user *, unsigned long); -#define save_altstack_ex(uss, sp) do { \ +#define unsafe_save_altstack(uss, sp, label) do { \ stack_t __user *__uss = uss; \ struct task_struct *t = current; \ - put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ - put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ - put_user_ex(t->sas_ss_size, &__uss->ss_size); \ + unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \ + unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \ + unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \ if (t->sas_ss_flags & SS_AUTODISARM) \ sas_ss_reset(t); \ } while (0); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5b50278c4bc8..3a2ac7072dbb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -47,8 +47,8 @@ * A. IP checksum related features * * Drivers advertise checksum offload capabilities in the features of a device. - * From the stack's point of view these are capabilities offered by the driver, - * a driver typically only advertises features that it is capable of offloading + * From the stack's point of view these are capabilities offered by the driver. + * A driver typically only advertises features that it is capable of offloading * to its device. * * The checksum related features are: @@ -63,7 +63,7 @@ * TCP or UDP packets over IPv4. These are specifically * unencapsulated packets of the form IPv4|TCP or * IPv4|UDP where the Protocol field in the IPv4 header - * is TCP or UDP. The IPv4 header may contain IP options + * is TCP or UDP. The IPv4 header may contain IP options. * This feature cannot be set in features for a device * with NETIF_F_HW_CSUM also set. This feature is being * DEPRECATED (see below). @@ -79,13 +79,13 @@ * DEPRECATED (see below). * * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. - * This flag is used only used to disable the RX checksum + * This flag is only used to disable the RX checksum * feature for a device. The stack will accept receive * checksum indication in packets received on a device * regardless of whether NETIF_F_RXCSUM is set. * * B. Checksumming of received packets by device. Indication of checksum - * verification is in set skb->ip_summed. Possible values are: + * verification is set in skb->ip_summed. Possible values are: * * CHECKSUM_NONE: * @@ -115,16 +115,16 @@ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet * and a device is able to verify the checksums for UDP (possibly zero), - * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * GRE (checksum flag is set) and TCP, skb->csum_level would be set to * two. If the device were only able to verify the UDP checksum and not - * GRE, either because it doesn't support GRE checksum of because GRE + * GRE, either because it doesn't support GRE checksum or because GRE * checksum is bad, skb->csum_level would be set to zero (TCP checksum is * not considered in this case). * * CHECKSUM_COMPLETE: * * This is the most generic way. The device supplied checksum of the _whole_ - * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the + * packet as seen by netif_rx() and fills in skb->csum. This means the * hardware doesn't need to parse L3/L4 headers to implement this. * * Notes: @@ -153,8 +153,8 @@ * from skb->csum_start up to the end, and to record/write the checksum at * offset skb->csum_start + skb->csum_offset. A driver may verify that the * csum_start and csum_offset values are valid values given the length and - * offset of the packet, however they should not attempt to validate that the - * checksum refers to a legitimate transport layer checksum-- it is the + * offset of the packet, but it should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum -- it is the * purview of the stack to validate that csum_start and csum_offset are set * correctly. * @@ -178,18 +178,18 @@ * * CHECKSUM_UNNECESSARY: * - * This has the same meaning on as CHECKSUM_NONE for checksum offload on + * This has the same meaning as CHECKSUM_NONE for checksum offload on * output. * * CHECKSUM_COMPLETE: * Not used in checksum output. If a driver observes a packet with this value - * set in skbuff, if should treat as CHECKSUM_NONE being set. + * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set. * * D. Non-IP checksum (CRC) offloads * * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of * offloading the SCTP CRC in a packet. To perform this offload the stack - * will set set csum_start and csum_offset accordingly, set ip_summed to + * will set csum_start and csum_offset accordingly, set ip_summed to * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c. * A driver that supports both IP checksum offload and SCTP CRC32c offload @@ -200,10 +200,10 @@ * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of * offloading the FCOE CRC in a packet. To perform this offload the stack * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset - * accordingly. Note the there is no indication in the skbuff that the - * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports + * accordingly. Note that there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports * both IP checksum offload and FCOE CRC offload must verify which offload - * is configured for a packet presumably by inspecting packet headers. + * is configured for a packet, presumably by inspecting packet headers. * * E. Checksumming on output with GSO. * @@ -211,9 +211,9 @@ * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as * part of the GSO operation is implied. If a checksum is being offloaded - * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset - * are set to refer to the outermost checksum being offload (two offloaded - * checksums are possible with UDP encapsulation). + * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and + * csum_offset are set to refer to the outermost checksum being offloaded + * (two offloaded checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ @@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t; * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress - * @tc_redirected: packet was redirected by a tc action - * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect + * @redirected: packet was redirected by packet classifier + * @from_ingress: packet was redirected from the ingress path * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag @@ -848,8 +848,10 @@ struct sk_buff { #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; - __u8 tc_redirected:1; - __u8 tc_from_ingress:1; +#endif +#ifdef CONFIG_NET_REDIRECT + __u8 redirected:1; + __u8 from_ingress:1; #endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; @@ -3514,23 +3516,15 @@ int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, struct sk_buff_head *queue, unsigned int flags, - void (*destructor)(struct sock *sk, - struct sk_buff *skb), int *off, int *err, struct sk_buff **last); struct sk_buff *__skb_try_recv_datagram(struct sock *sk, struct sk_buff_head *queue, - unsigned int flags, - void (*destructor)(struct sock *sk, - struct sk_buff *skb), - int *off, int *err, + unsigned int flags, int *off, int *err, struct sk_buff **last); struct sk_buff *__skb_recv_datagram(struct sock *sk, struct sk_buff_head *sk_queue, - unsigned int flags, - void (*destructor)(struct sock *sk, - struct sk_buff *skb), - int *off, int *err); + unsigned int flags, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); __poll_t datagram_poll(struct file *file, struct socket *sock, @@ -4395,8 +4389,8 @@ struct skb_gso_cb { __wsum csum; __u16 csum_start; }; -#define SKB_SGO_CB_OFFSET 32 -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) +#define SKB_GSO_CB_OFFSET 32 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) { @@ -4579,5 +4573,31 @@ static inline __wsum lco_csum(struct sk_buff *skb) return csum_partial(l4_hdr, csum_start - l4_hdr, partial); } +static inline bool skb_is_redirected(const struct sk_buff *skb) +{ +#ifdef CONFIG_NET_REDIRECT + return skb->redirected; +#else + return false; +#endif +} + +static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) +{ +#ifdef CONFIG_NET_REDIRECT + skb->redirected = 1; + skb->from_ingress = from_ingress; + if (skb->from_ingress) + skb->tstamp = 0; +#endif +} + +static inline void skb_reset_redirect(struct sk_buff *skb) +{ +#ifdef CONFIG_NET_REDIRECT + skb->redirected = 0; +#endif +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 14d61bba0b79..8a709f63c5e5 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -323,14 +323,6 @@ static inline void sk_psock_free_link(struct sk_psock_link *link) } struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); -#if defined(CONFIG_BPF_STREAM_PARSER) -void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link); -#else -static inline void sk_psock_unlink(struct sock *sk, - struct sk_psock_link *link) -{ -} -#endif void __sk_psock_purge_ingress_msg(struct sk_psock *psock); @@ -347,33 +339,37 @@ static inline void sk_psock_update_proto(struct sock *sk, struct sk_psock *psock, struct proto *ops) { - psock->saved_unhash = sk->sk_prot->unhash; - psock->saved_close = sk->sk_prot->close; - psock->saved_write_space = sk->sk_write_space; + /* Initialize saved callbacks and original proto only once, since this + * function may be called multiple times for a psock, e.g. when + * psock->progs.msg_parser is updated. + * + * Since we've not installed the new proto, psock is not yet in use and + * we can initialize it without synchronization. + */ + if (!psock->sk_proto) { + struct proto *orig = READ_ONCE(sk->sk_prot); - psock->sk_proto = sk->sk_prot; - sk->sk_prot = ops; + psock->saved_unhash = orig->unhash; + psock->saved_close = orig->close; + psock->saved_write_space = sk->sk_write_space; + + psock->sk_proto = orig; + } + + /* Pairs with lockless read in sk_clone_lock() */ + WRITE_ONCE(sk->sk_prot, ops); } static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { sk->sk_prot->unhash = psock->saved_unhash; - - if (psock->sk_proto) { - struct inet_connection_sock *icsk = inet_csk(sk); - bool has_ulp = !!icsk->icsk_ulp_data; - - if (has_ulp) { - tcp_update_ulp(sk, psock->sk_proto, - psock->saved_write_space); - } else { - sk->sk_prot = psock->sk_proto; - sk->sk_write_space = psock->saved_write_space; - } - psock->sk_proto = NULL; + if (inet_csk_has_ulp(sk)) { + tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); } else { sk->sk_write_space = psock->saved_write_space; + /* Pairs with lockless read in sk_clone_lock() */ + WRITE_ONCE(sk->sk_prot, psock->sk_proto); } } @@ -395,26 +391,6 @@ static inline bool sk_psock_test_state(const struct sk_psock *psock, return test_bit(bit, &psock->state); } -static inline struct sk_psock *sk_psock_get_checked(struct sock *sk) -{ - struct sk_psock *psock; - - rcu_read_lock(); - psock = sk_psock(sk); - if (psock) { - if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) { - psock = ERR_PTR(-EBUSY); - goto out; - } - - if (!refcount_inc_not_zero(&psock->refcnt)) - psock = ERR_PTR(-EBUSY); - } -out: - rcu_read_unlock(); - return psock; -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index c5d52e2cb275..7f0bc3cf4d61 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -85,6 +85,7 @@ struct apr_device { uint16_t domain_id; uint32_t version; char name[APR_NAME_SIZE]; + const char *service_path; spinlock_t lock; struct list_head node; }; diff --git a/include/linux/soc/qcom/pdr.h b/include/linux/soc/qcom/pdr.h new file mode 100644 index 000000000000..83a8ea612e69 --- /dev/null +++ b/include/linux/soc/qcom/pdr.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PDR_HELPER__ +#define __QCOM_PDR_HELPER__ + +#include <linux/soc/qcom/qmi.h> + +#define SERVREG_NAME_LENGTH 64 + +struct pdr_service; +struct pdr_handle; + +enum servreg_service_state { + SERVREG_LOCATOR_ERR = 0x1, + SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF, + SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF, + SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF, + SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF, +}; + +struct pdr_handle *pdr_handle_alloc(void (*status)(int state, + char *service_path, + void *priv), void *priv); +struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr, + const char *service_name, + const char *service_path); +int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds); +void pdr_handle_release(struct pdr_handle *pdr); + +#endif diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index 5efa2b67fa55..e712f94b89fc 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -88,6 +88,7 @@ struct qmi_elem_info { #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 #define QMI_ERR_INVALID_ID_V01 41 #define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_DISABLED_V01 69 #define QMI_ERR_INCOMPATIBLE_STATE_V01 90 #define QMI_ERR_NOT_SUPPORTED_V01 94 diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index 9e4fdd861a51..da304ce8c8f7 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -10,6 +10,7 @@ struct qcom_smd_rpm; /* * Constants used for addressing resources in the RPM. */ +#define QCOM_SMD_RPM_BBYB 0x62796262 #define QCOM_SMD_RPM_BOBB 0x62626f62 #define QCOM_SMD_RPM_BOOST 0x61747362 #define QCOM_SMD_RPM_BUS_CLK 0x316b6c63 diff --git a/include/linux/socket.h b/include/linux/socket.h index 2d2313403101..54338fac45cb 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -391,6 +391,10 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg, struct user_msghdr __user *umsg, unsigned flags, struct sockaddr __user **uaddr, struct iovec **iov); +extern int __copy_msghdr_from_user(struct msghdr *kmsg, + struct user_msghdr __user *umsg, + struct sockaddr __user **save_addr, + struct iovec __user **uiov, size_t *nsegs); /* helpers which do the actual work for syscalls */ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, @@ -401,7 +405,8 @@ extern int __sys_sendto(int fd, void __user *buff, size_t len, int addr_len); extern int __sys_accept4_file(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags); + int __user *upeer_addrlen, int flags, + unsigned long nofile); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index b451bb622335..00f5826092e3 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -80,6 +80,21 @@ enum sdw_slave_status { }; /** + * enum sdw_clk_stop_type: clock stop operations + * + * @SDW_CLK_PRE_PREPARE: pre clock stop prepare + * @SDW_CLK_POST_PREPARE: post clock stop prepare + * @SDW_CLK_PRE_DEPREPARE: pre clock stop de-prepare + * @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare + */ +enum sdw_clk_stop_type { + SDW_CLK_PRE_PREPARE = 0, + SDW_CLK_POST_PREPARE, + SDW_CLK_PRE_DEPREPARE, + SDW_CLK_POST_DEPREPARE, +}; + +/** * enum sdw_command_response - Command response as defined by SDW spec * @SDW_CMD_OK: cmd was successful * @SDW_CMD_IGNORED: cmd was ignored @@ -284,6 +299,7 @@ struct sdw_dpn_audio_mode { * @max_async_buffer: Number of samples that this port can buffer in * asynchronous modes * @block_pack_mode: Type of block port mode supported + * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register * @port_encoding: Payload Channel Sample encoding schemes supported * @audio_modes: Audio modes supported */ @@ -307,6 +323,7 @@ struct sdw_dpn_prop { u32 modes; u32 max_async_buffer; bool block_pack_mode; + bool read_only_wordlength; u32 port_encoding; struct sdw_dpn_audio_mode *audio_modes; }; @@ -424,6 +441,29 @@ struct sdw_slave_id { __u8 sdw_version:4; }; +/* + * Helper macros to extract the MIPI-defined IDs + * + * Spec definition + * Register Bit Contents + * DevId_0 [7:4] 47:44 sdw_version + * DevId_0 [3:0] 43:40 unique_id + * DevId_1 39:32 mfg_id [15:8] + * DevId_2 31:24 mfg_id [7:0] + * DevId_3 23:16 part_id [15:8] + * DevId_4 15:08 part_id [7:0] + * DevId_5 07:00 class_id + * + * The MIPI DisCo for SoundWire defines in addition the link_id as bits 51:48 + */ + +#define SDW_DISCO_LINK_ID(adr) (((adr) >> 48) & GENMASK(3, 0)) +#define SDW_VERSION(adr) (((adr) >> 44) & GENMASK(3, 0)) +#define SDW_UNIQUE_ID(adr) (((adr) >> 40) & GENMASK(3, 0)) +#define SDW_MFG_ID(adr) (((adr) >> 24) & GENMASK(15, 0)) +#define SDW_PART_ID(adr) (((adr) >> 8) & GENMASK(15, 0)) +#define SDW_CLASS_ID(adr) ((adr) & GENMASK(7, 0)) + /** * struct sdw_slave_intr_status - Slave interrupt status * @control_port: control port status @@ -533,6 +573,11 @@ struct sdw_slave_ops { int (*port_prep)(struct sdw_slave *slave, struct sdw_prepare_ch *prepare_ch, enum sdw_port_prep_ops pre_ops); + int (*get_clk_stop_mode)(struct sdw_slave *slave); + int (*clk_stop)(struct sdw_slave *slave, + enum sdw_clk_stop_mode mode, + enum sdw_clk_stop_type type); + }; /** @@ -575,6 +620,7 @@ struct sdw_slave { #endif struct list_head node; struct completion *port_ready; + enum sdw_clk_stop_mode curr_clk_stop_mode; u16 dev_num; u16 dev_num_sticky; bool probed; @@ -892,6 +938,9 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); int sdw_disable_stream(struct sdw_stream_runtime *stream); int sdw_deprepare_stream(struct sdw_stream_runtime *stream); +int sdw_bus_prep_clk_stop(struct sdw_bus *bus); +int sdw_bus_clk_stop(struct sdw_bus *bus); +int sdw_bus_exit_clk_stop(struct sdw_bus *bus); /* messaging and data APIs */ diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h index edf4beccdadb..0b857616919c 100644 --- a/include/linux/spi/corgi_lcd.h +++ b/include/linux/spi/corgi_lcd.h @@ -11,9 +11,6 @@ struct corgi_lcd_platform_data { int default_intensity; int limit_mask; - int gpio_backlight_on; /* -1 if n/a */ - int gpio_backlight_cont; /* -1 if n/a */ - void (*notify)(int intensity); void (*kick_battery)(void); }; diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h index 694268c78d5d..6d19b09139d0 100644 --- a/include/linux/spi/ifx_modem.h +++ b/include/linux/spi/ifx_modem.h @@ -3,12 +3,7 @@ #define LINUX_IFX_MODEM_H struct ifx_modem_platform_data { - unsigned short rst_out; /* modem reset out */ - unsigned short pwr_on; /* power on */ - unsigned short rst_pmu; /* reset modem */ unsigned short tx_pwr; /* modem power threshold */ - unsigned short srdy; /* SRDY */ - unsigned short mrdy; /* MRDY */ unsigned char modem_type; /* Modem type */ unsigned long max_hz; /* max SPI frequency */ unsigned short use_dma:1; /* spi protocol driver supplies diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6d16ba01ff5a..38286de779e3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -135,6 +135,8 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * @modalias: Name of the driver to use with this device, or an alias * for that name. This appears in the sysfs "modalias" attribute * for driver coldplugging, and in uevents used for hotplugging + * @driver_override: If the name of a driver is written to this attribute, then + * the device will bind to the named driver and only the named driver. * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when * not using a GPIO line) use cs_gpiod in new drivers by opting in on * the spi_master. @@ -443,6 +445,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @spi_transfer->ptp_sts_word_post were transmitted. * If the driver does not set this, the SPI core takes the snapshot as * close to the driver hand-over as possible. + * @irq_flags: Interrupt enable state during PTP system timestamping * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -481,6 +484,9 @@ struct spi_controller { /* spi_device.mode flags understood by this controller driver */ u32 mode_bits; + /* spi_device.mode flags override flags for this controller */ + u32 buswidth_override_bits; + /* bitmask of supported bits_per_word for transfers */ u32 bits_per_word_mask; #define SPI_BPW_MASK(bits) BIT((bits) - 1) @@ -930,8 +936,7 @@ struct spi_transfer { struct ptp_system_timestamp *ptp_sts; - bool timestamped_pre; - bool timestamped_post; + bool timestamped; struct list_head transfer_list; }; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 031ce8617df8..d3770b3f9d9a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -93,12 +93,13 @@ #ifdef CONFIG_DEBUG_SPINLOCK extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, - struct lock_class_key *key); -# define raw_spin_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - __raw_spin_lock_init((lock), #lock, &__key); \ + struct lock_class_key *key, short inner); + +# define raw_spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ } while (0) #else @@ -327,12 +328,26 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) return &lock->rlock; } -#define spin_lock_init(_lock) \ -do { \ - spinlock_check(_lock); \ - raw_spin_lock_init(&(_lock)->rlock); \ +#ifdef CONFIG_DEBUG_SPINLOCK + +# define spin_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + __raw_spin_lock_init(spinlock_check(lock), \ + #lock, &__key, LD_WAIT_CONFIG); \ +} while (0) + +#else + +# define spin_lock_init(_lock) \ +do { \ + spinlock_check(_lock); \ + *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ } while (0) +#endif + static __always_inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 24b4e6f2c1a2..6102e6bff3ae 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -33,8 +33,18 @@ typedef struct raw_spinlock { #define SPINLOCK_OWNER_INIT ((void *)-1L) #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +# define RAW_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SPIN, \ + } +# define SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } #else +# define RAW_SPIN_DEP_MAP_INIT(lockname) # define SPIN_DEP_MAP_INIT(lockname) #endif @@ -51,7 +61,7 @@ typedef struct raw_spinlock { { \ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } + RAW_SPIN_DEP_MAP_INIT(lockname) } #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) @@ -72,11 +82,17 @@ typedef struct spinlock { }; } spinlock_t; +#define ___SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + #define __SPIN_LOCK_INITIALIZER(lockname) \ - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } } #define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname) #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) diff --git a/include/linux/splice.h b/include/linux/splice.h index 74b4911ac16d..ebbbfea48aa0 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -78,6 +78,9 @@ extern ssize_t add_to_pipe(struct pipe_inode_info *, struct pipe_buffer *); extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, splice_direct_actor *); +extern long do_splice(struct file *in, loff_t __user *off_in, + struct file *out, loff_t __user *off_out, + size_t len, unsigned int flags); /* * for dynamic pipe sizing diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 3efa97d482cb..24d49c732341 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -19,4 +19,6 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries); +unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); + #endif diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h index 6b792d080eee..4c678c4fec58 100644 --- a/include/linux/stackprotector.h +++ b/include/linux/stackprotector.h @@ -6,7 +6,7 @@ #include <linux/sched.h> #include <linux/random.h> -#ifdef CONFIG_STACKPROTECTOR +#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH) # include <asm/stackprotector.h> #else static inline void boot_init_stack_canary(void) diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 19190c609282..fbafb353e9be 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -80,6 +80,7 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; + unsigned int has_xpcs; int *irqs; int probed_phy_irq; bool needs_reset; diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 0f64de7caa39..10891b70fc7b 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -179,6 +179,9 @@ sunrpc_cache_update(struct cache_detail *detail, extern int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); +extern int +sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, + struct cache_head *h); extern void cache_clean_deferred(void *owner); @@ -206,11 +209,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) { + if (h->expiry_time < seconds_since_boot()) + return true; if (!test_bit(CACHE_VALID, &h->flags)) return false; - - return (h->expiry_time < seconds_since_boot()) || - (detail->flush_time >= h->last_refresh); + return detail->flush_time >= h->last_refresh; } extern int cache_check(struct cache_detail *detail, diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 92d182fd8e3b..320c672d84de 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -58,7 +58,8 @@ enum { enum { rpcrdma_fixed_maxsz = 4, rpcrdma_segment_maxsz = 4, - rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz, + rpcrdma_readseg_maxsz = 1 + rpcrdma_segment_maxsz, + rpcrdma_readchunk_maxsz = 1 + rpcrdma_readseg_maxsz, }; /* diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index a6ef35184ef1..df696efdd675 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -132,6 +132,7 @@ struct rpc_task_setup { #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ #define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ #define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */ +#define RPC_TASK_CRED_NOREF 0x8000 /* No refcount on the credential */ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 1afe38eb33f7..fd390894a584 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -380,7 +380,7 @@ struct svc_deferred_req { struct cache_deferred_req handle; size_t xprt_hlen; int argslen; - __be32 args[0]; + __be32 args[]; }; struct svc_process_info { @@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +int svc_encode_read_payload(struct svc_rqst *rqstp, + unsigned int offset, + unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages, struct kvec *first, size_t total); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 40f65888dd38..78fe2ac6dc6c 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -52,6 +52,7 @@ /* Default and maximum inline threshold sizes */ enum { + RPCRDMA_PULLUP_THRESH = RPCRDMA_V1_DEF_INLINE_SIZE >> 1, RPCRDMA_DEF_INLINE_THRESH = 4096, RPCRDMA_MAX_INLINE_THRESH = 65536 }; @@ -132,11 +133,16 @@ struct svc_rdma_recv_ctxt { struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; + struct xdr_stream rc_stream; bool rc_temp; u32 rc_byte_len; unsigned int rc_page_count; unsigned int rc_hdr_count; u32 rc_inv_rkey; + __be32 *rc_write_list; + __be32 *rc_reply_chunk; + unsigned int rc_read_payload_offset; + unsigned int rc_read_payload_length; struct page *rc_pages[RPCSVC_MAXPAGES]; }; @@ -144,6 +150,8 @@ struct svc_rdma_send_ctxt { struct list_head sc_list; struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; + struct xdr_buf sc_hdrbuf; + struct xdr_stream sc_stream; void *sc_xprt_buf; int sc_page_count; int sc_cur_sge_no; @@ -170,9 +178,11 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p); extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, - __be32 *wr_ch, struct xdr_buf *xdr); + __be32 *wr_ch, struct xdr_buf *xdr, + unsigned int offset, + unsigned long length); extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, - __be32 *rp_ch, bool writelist, + const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr); /* svc_rdma_sendto.c */ @@ -182,13 +192,13 @@ extern struct svc_rdma_send_ctxt * extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); -extern void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - unsigned int len); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, - struct svc_rdma_send_ctxt *ctxt, - struct xdr_buf *xdr, __be32 *wr_lst); + struct svc_rdma_send_ctxt *sctxt, + const struct svc_rdma_recv_ctxt *rctxt, + struct xdr_buf *xdr); extern int svc_rdma_sendto(struct svc_rqst *); +extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, + unsigned int length); /* svc_rdma_transport.c */ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ea6f46be9cb7..9e1e046de176 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -21,6 +21,8 @@ struct svc_xprt_ops { int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); + int (*xpo_read_payload)(struct svc_rqst *, unsigned int, + unsigned int); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b41f34977995..01bb41908c93 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -184,24 +184,9 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); -extern int xdr_buf_read_mic(struct xdr_buf *, struct xdr_netobj *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); -/* - * Helper structure for copying from an sk_buff. - */ -struct xdr_skb_reader { - struct sk_buff *skb; - unsigned int offset; - size_t count; - __wsum csum; -}; - -typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len); - -extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); - extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32); extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *); @@ -301,6 +286,59 @@ xdr_align_size(size_t n) } /** + * xdr_pad_size - Calculate size of an object's pad + * @n: Size of an object being XDR encoded (in bytes) + * + * This implementation avoids the need for conditional + * branches or modulo division. + * + * Return value: + * Size (in bytes) of the needed XDR pad + */ +static inline size_t xdr_pad_size(size_t n) +{ + return xdr_align_size(n) - n; +} + +/** + * xdr_stream_encode_item_present - Encode a "present" list item + * @xdr: pointer to xdr_stream + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr) +{ + const size_t len = sizeof(__be32); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = xdr_one; + return len; +} + +/** + * xdr_stream_encode_item_absent - Encode a "not present" list item + * @xdr: pointer to xdr_stream + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) +{ + const size_t len = sizeof(__be32); + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + *p = xdr_zero; + return len; +} + +/** * xdr_stream_encode_u32 - Encode a 32-bit integer * @xdr: pointer to xdr_stream * @n: integer to encode diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2b2055b035ee..4fcc6fd0cbd6 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -566,38 +566,4 @@ static inline void queue_up_suspend_work(void) {} #endif /* !CONFIG_PM_AUTOSLEEP */ -#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS -/* - * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture - * to save/restore additional information to/from the array of page - * frame numbers in the hibernation image. For s390 this is used to - * save and restore the storage key for each page that is included - * in the hibernation image. - */ -unsigned long page_key_additional_pages(unsigned long pages); -int page_key_alloc(unsigned long pages); -void page_key_free(void); -void page_key_read(unsigned long *pfn); -void page_key_memorize(unsigned long *pfn); -void page_key_write(void *address); - -#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ - -static inline unsigned long page_key_additional_pages(unsigned long pages) -{ - return 0; -} - -static inline int page_key_alloc(unsigned long pages) -{ - return 0; -} - -static inline void page_key_free(void) {} -static inline void page_key_read(unsigned long *pfn) {} -static inline void page_key_memorize(unsigned long *pfn) {} -static inline void page_key_write(void *address) {} - -#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ - #endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 1e99f7ac1d7e..b835d8dbea0e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -374,7 +374,6 @@ extern int sysctl_min_slab_ratio; #define node_reclaim_mode 0 #endif -extern int page_evictable(struct page *page); extern void check_move_unevictable_pages(struct pagevec *pvec); extern int kswapd_run(int nid); diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 877fd239b6ff..d9b7c9132c2f 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -68,6 +68,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) if (pte_swp_soft_dirty(pte)) pte = pte_swp_clear_soft_dirty(pte); + if (pte_swp_uffd_wp(pte)) + pte = pte_swp_clear_uffd_wp(pte); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } @@ -348,7 +350,8 @@ static inline void num_poisoned_pages_inc(void) } #endif -#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) +#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION) || \ + defined(CONFIG_DEVICE_PRIVATE) static inline int non_swap_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fa7ee503fb76..80bb865b3a33 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -297,9 +297,10 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); -int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name); +int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); @@ -310,6 +311,18 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) return kernfs_enable_ns(kn); } +int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, + kgid_t kgid); +int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); +int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, + const char *name, kuid_t kuid, kgid_t kgid); +int sysfs_groups_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid); +int sysfs_group_change_owner(struct kobject *kobj, + const struct attribute_group *groups, kuid_t kuid, + kgid_t kgid); + #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@ -500,10 +513,10 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, { } -static inline int __compat_only_sysfs_link_entry_to_kobj( - struct kobject *kobj, - struct kobject *target_kobj, - const char *target_name) +static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name, + const char *symlink_name) { return 0; } @@ -522,6 +535,40 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) { } +static inline int sysfs_file_change_owner(struct kobject *kobj, + const char *name, kuid_t kuid, + kgid_t kgid) +{ + return 0; +} + +static inline int sysfs_link_change_owner(struct kobject *kobj, + struct kobject *targ, + const char *name, kuid_t kuid, + kgid_t kgid) +{ + return 0; +} + +static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) +{ + return 0; +} + +static inline int sysfs_groups_change_owner(struct kobject *kobj, + const struct attribute_group **groups, + kuid_t kuid, kgid_t kgid) +{ + return 0; +} + +static inline int sysfs_group_change_owner(struct kobject *kobj, + const struct attribute_group *groups, + kuid_t kuid, kgid_t kgid) +{ + return 0; +} + #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 8c71874e8485..8e159e16850f 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -50,6 +50,7 @@ int unregister_sysrq_key(int key, struct sysrq_key_op *op); struct sysrq_key_op *__sysrq_get_key_op(int key); int sysrq_toggle_support(int enable_mask); +int sysrq_mask(void); #else @@ -71,6 +72,12 @@ static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op) return -EINVAL; } +static inline int sysrq_mask(void) +{ + /* Magic SysRq disabled mask */ + return 0; +} + #endif #endif /* _LINUX_SYSRQ_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 3dc964010fef..421c99c12291 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -86,9 +86,19 @@ struct mptcp_options_received { u64 data_seq; u32 subflow_seq; u16 data_len; - u8 mp_capable : 1, + u16 mp_capable : 1, mp_join : 1, - dss : 1; + dss : 1, + add_addr : 1, + rm_addr : 1, + family : 4, + echo : 1, + backup : 1; + u32 token; + u32 nonce; + u64 thmac; + u8 hmac[20]; + u8 join_id; u8 use_map:1, dsn64:1, data_fin:1, @@ -96,6 +106,16 @@ struct mptcp_options_received { ack64:1, mpc_map:1, __unused:2; + u8 addr_id; + u8 rm_id; + union { + struct in_addr addr; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + struct in6_addr addr6; +#endif + }; + u64 ahmac; + u16 port; }; #endif @@ -131,6 +151,8 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) #if IS_ENABLED(CONFIG_MPTCP) rx_opt->mptcp.mp_capable = 0; rx_opt->mptcp.mp_join = 0; + rx_opt->mptcp.add_addr = 0; + rx_opt->mptcp.rm_addr = 0; rx_opt->mptcp.dss = 0; #endif } diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 7a03f68fb982..1412e9cc79ce 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -49,7 +49,6 @@ struct tee_shm_pool; */ struct tee_context { struct tee_device *teedev; - struct list_head list_shm; void *data; struct kref refcount; bool releasing; @@ -168,9 +167,7 @@ void tee_device_unregister(struct tee_device *teedev); /** * struct tee_shm - shared memory object - * @teedev: device used to allocate the object - * @ctx: context using the object, if NULL the context is gone - * @link link element + * @ctx: context using the object * @paddr: physical address of the shared memory * @kaddr: virtual address of the shared memory * @size: size of shared memory @@ -185,9 +182,7 @@ void tee_device_unregister(struct tee_device *teedev); * subsystem and from drivers that implements their own shm pool manager. */ struct tee_shm { - struct tee_device *teedev; struct tee_context *ctx; - struct list_head link; phys_addr_t paddr; void *kaddr; size_t size; @@ -319,18 +314,6 @@ void *tee_get_drvdata(struct tee_device *teedev); struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); /** - * tee_shm_priv_alloc() - Allocate shared memory privately - * @dev: Device that allocates the shared memory - * @size: Requested size of shared memory - * - * Allocates shared memory buffer that is not associated with any client - * context. Such buffers are owned by TEE driver and used for internal calls. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); - -/** * tee_shm_register() - Register shared memory buffer * @ctx: Context that registers the shared memory * @addr: Address is userspace of the shared buffer diff --git a/include/linux/threads.h b/include/linux/threads.h index 3086dba525e2..18d5a74bcc3d 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -29,7 +29,7 @@ /* * A maximum of 4 million PIDs should be enough for a while. - * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] + * [NOTE: PID/TIDs are limited to 2^30 ~= 1 billion, see FUTEX_TID_MASK.] */ #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) diff --git a/include/linux/time.h b/include/linux/time.h index 8ef5e5cc9f57..4c325bf44ce0 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -111,9 +111,6 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its) */ #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) -struct timens_offset { - s64 sec; - u64 nsec; -}; +# include <vdso/time.h> #endif diff --git a/include/linux/time32.h b/include/linux/time32.h index cf9320cd2d0b..83a400b6ba99 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -12,17 +12,7 @@ #include <linux/time64.h> #include <linux/timex.h> -typedef s32 old_time32_t; - -struct old_timespec32 { - old_time32_t tv_sec; - s32 tv_nsec; -}; - -struct old_timeval32 { - old_time32_t tv_sec; - s32 tv_usec; -}; +#include <vdso/time32.h> struct old_itimerspec32 { struct old_timespec32 it_interval; diff --git a/include/linux/time64.h b/include/linux/time64.h index 19125489ae94..c9dcb3e5781f 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -3,6 +3,7 @@ #define _LINUX_TIME64_H #include <linux/math64.h> +#include <vdso/time64.h> typedef __s64 time64_t; typedef __u64 timeu64_t; @@ -19,15 +20,6 @@ struct itimerspec64 { struct timespec64 it_value; }; -/* Parameters used to convert the timespec values: */ -#define MSEC_PER_SEC 1000L -#define USEC_PER_MSEC 1000L -#define NSEC_PER_USEC 1000L -#define NSEC_PER_MSEC 1000000L -#define USEC_PER_SEC 1000000L -#define NSEC_PER_SEC 1000000000L -#define FSEC_PER_SEC 1000000000000000LL - /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define TIME64_MIN (-TIME64_MAX - 1) diff --git a/include/linux/timer.h b/include/linux/timer.h index 1e6650ed066d..0dc19a8c39c9 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -164,7 +164,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { } */ static inline int timer_pending(const struct timer_list * timer) { - return timer->entry.pprev != NULL; + return !hlist_unhashed_lockless(&timer->entry); } extern void add_timer_on(struct timer_list *timer, int cpu); diff --git a/include/linux/tnum.h b/include/linux/tnum.h index ea627d1ab7e3..498dbcedb451 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -86,4 +86,16 @@ int tnum_strn(char *str, size_t size, struct tnum a); /* Format a tnum as tristate binary expansion */ int tnum_sbin(char *str, size_t size, struct tnum a); +/* Returns the 32-bit subreg */ +struct tnum tnum_subreg(struct tnum a); +/* Returns the tnum with the lower 32-bit subreg cleared */ +struct tnum tnum_clear_subreg(struct tnum a); +/* Returns the tnum with the lower 32-bit subreg set to value */ +struct tnum tnum_const_subreg(struct tnum a, u32 value); +/* Returns true if 32-bit subreg @a is a known constant*/ +static inline bool tnum_subreg_is_const(struct tnum a) +{ + return !(tnum_subreg(a)).mask; +} + #endif /* _LINUX_TNUM_H */ diff --git a/include/linux/topology.h b/include/linux/topology.h index eb2fe6edd73c..608fa4aadf0e 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -130,20 +130,11 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); -extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); - _node_numa_mem_[numa_node_id()] = node; -} -#endif - -#ifndef node_to_mem_node -static inline int node_to_mem_node(int node) -{ - return _node_numa_mem_[node]; } #endif @@ -166,7 +157,6 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; - _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -180,13 +170,6 @@ static inline int numa_mem_id(void) } #endif -#ifndef node_to_mem_node -static inline int node_to_mem_node(int node) -{ - return node; -} -#endif - #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 6c7a10a6d71e..5c6943354049 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -85,6 +85,8 @@ struct trace_iterator { struct mutex mutex; struct ring_buffer_iter **buffer_iter; unsigned long iter_flags; + void *temp; /* temp holder */ + unsigned int temp_size; /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; diff --git a/include/linux/uacce.h b/include/linux/uacce.h new file mode 100644 index 000000000000..0e215e6d0534 --- /dev/null +++ b/include/linux/uacce.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_UACCE_H +#define _LINUX_UACCE_H + +#include <linux/cdev.h> +#include <uapi/misc/uacce/uacce.h> + +#define UACCE_NAME "uacce" +#define UACCE_MAX_REGION 2 +#define UACCE_MAX_NAME_SIZE 64 + +struct uacce_queue; +struct uacce_device; + +/** + * struct uacce_qfile_region - structure of queue file region + * @type: type of the region + */ +struct uacce_qfile_region { + enum uacce_qfrt type; +}; + +/** + * struct uacce_ops - uacce device operations + * @get_available_instances: get available instances left of the device + * @get_queue: get a queue from the device + * @put_queue: free a queue to the device + * @start_queue: make the queue start work after get_queue + * @stop_queue: make the queue stop work before put_queue + * @is_q_updated: check whether the task is finished + * @mmap: mmap addresses of queue to user space + * @ioctl: ioctl for user space users of the queue + */ +struct uacce_ops { + int (*get_available_instances)(struct uacce_device *uacce); + int (*get_queue)(struct uacce_device *uacce, unsigned long arg, + struct uacce_queue *q); + void (*put_queue)(struct uacce_queue *q); + int (*start_queue)(struct uacce_queue *q); + void (*stop_queue)(struct uacce_queue *q); + int (*is_q_updated)(struct uacce_queue *q); + int (*mmap)(struct uacce_queue *q, struct vm_area_struct *vma, + struct uacce_qfile_region *qfr); + long (*ioctl)(struct uacce_queue *q, unsigned int cmd, + unsigned long arg); +}; + +/** + * struct uacce_interface - interface required for uacce_register() + * @name: the uacce device name. Will show up in sysfs + * @flags: uacce device attributes + * @ops: pointer to the struct uacce_ops + */ +struct uacce_interface { + char name[UACCE_MAX_NAME_SIZE]; + unsigned int flags; + const struct uacce_ops *ops; +}; + +enum uacce_q_state { + UACCE_Q_ZOMBIE = 0, + UACCE_Q_INIT, + UACCE_Q_STARTED, +}; + +/** + * struct uacce_queue + * @uacce: pointer to uacce + * @priv: private pointer + * @wait: wait queue head + * @list: index into uacce_mm + * @uacce_mm: the corresponding mm + * @qfrs: pointer of qfr regions + * @state: queue state machine + */ +struct uacce_queue { + struct uacce_device *uacce; + void *priv; + wait_queue_head_t wait; + struct list_head list; + struct uacce_mm *uacce_mm; + struct uacce_qfile_region *qfrs[UACCE_MAX_REGION]; + enum uacce_q_state state; +}; + +/** + * struct uacce_device + * @algs: supported algorithms + * @api_ver: api version + * @ops: pointer to the struct uacce_ops + * @qf_pg_num: page numbers of the queue file regions + * @parent: pointer to the parent device + * @is_vf: whether virtual function + * @flags: uacce attributes + * @dev_id: id of the uacce device + * @cdev: cdev of the uacce + * @dev: dev of the uacce + * @priv: private pointer of the uacce + * @mm_list: list head of uacce_mm->list + * @mm_lock: lock for mm_list + * @inode: core vfs + */ +struct uacce_device { + const char *algs; + const char *api_ver; + const struct uacce_ops *ops; + unsigned long qf_pg_num[UACCE_MAX_REGION]; + struct device *parent; + bool is_vf; + u32 flags; + u32 dev_id; + struct cdev *cdev; + struct device dev; + void *priv; + struct list_head mm_list; + struct mutex mm_lock; + struct inode *inode; +}; + +/** + * struct uacce_mm - keep track of queues bound to a process + * @list: index into uacce_device + * @queues: list of queues + * @mm: the mm struct + * @lock: protects the list of queues + * @pasid: pasid of the uacce_mm + * @handle: iommu_sva handle return from iommu_sva_bind_device + */ +struct uacce_mm { + struct list_head list; + struct list_head queues; + struct mm_struct *mm; + struct mutex lock; + int pasid; + struct iommu_sva *handle; +}; + +#if IS_ENABLED(CONFIG_UACCE) + +struct uacce_device *uacce_alloc(struct device *parent, + struct uacce_interface *interface); +int uacce_register(struct uacce_device *uacce); +void uacce_remove(struct uacce_device *uacce); + +#else /* CONFIG_UACCE */ + +static inline +struct uacce_device *uacce_alloc(struct device *parent, + struct uacce_interface *interface) +{ + return ERR_PTR(-ENODEV); +} + +static inline int uacce_register(struct uacce_device *uacce) +{ + return -EINVAL; +} + +static inline void uacce_remove(struct uacce_device *uacce) {} + +#endif /* CONFIG_UACCE */ + +#endif /* _LINUX_UACCE_H */ diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 01081c4726c0..54bf6b118401 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -24,10 +24,10 @@ struct uio_map; * struct uio_mem - description of a UIO memory region * @name: name of the memory region for identification * @addr: address of the device's memory rounded to page - * size (phys_addr is used since addr can be - * logical, virtual, or physical & phys_addr_t - * should always be large enough to handle any of - * the address types) + * size (phys_addr is used since addr can be + * logical, virtual, or physical & phys_addr_t + * should always be large enough to handle any of + * the address types) * @offs: offset of device memory within the page * @size: size of IO (multiple of page size) * @memtype: type of memory addr points to @@ -67,16 +67,16 @@ struct uio_port { #define MAX_UIO_PORT_REGIONS 5 struct uio_device { - struct module *owner; + struct module *owner; struct device dev; - int minor; - atomic_t event; - struct fasync_struct *async_queue; - wait_queue_head_t wait; - struct uio_info *info; + int minor; + atomic_t event; + struct fasync_struct *async_queue; + wait_queue_head_t wait; + struct uio_info *info; struct mutex info_lock; - struct kobject *map_dir; - struct kobject *portio_dir; + struct kobject *map_dir; + struct kobject *portio_dir; }; /** @@ -123,6 +123,15 @@ extern int __must_check extern void uio_unregister_device(struct uio_info *info); extern void uio_event_notify(struct uio_info *info); +extern int __must_check + __devm_uio_register_device(struct module *owner, + struct device *parent, + struct uio_info *info); + +/* use a define to avoid include chaining to get THIS_MODULE */ +#define devm_uio_register_device(parent, info) \ + __devm_uio_register_device(THIS_MODULE, parent, info) + /* defines for uio_info->irq */ #define UIO_IRQ_CUSTOM -1 #define UIO_IRQ_NONE 0 diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h index 8bdb8fa01bd4..c43ff5918c8a 100644 --- a/include/linux/unaligned/be_byteshift.h +++ b/include/linux/unaligned/be_byteshift.h @@ -40,17 +40,17 @@ static inline void __put_unaligned_be64(u64 val, u8 *p) static inline u16 get_unaligned_be16(const void *p) { - return __get_unaligned_be16((const u8 *)p); + return __get_unaligned_be16(p); } static inline u32 get_unaligned_be32(const void *p) { - return __get_unaligned_be32((const u8 *)p); + return __get_unaligned_be32(p); } static inline u64 get_unaligned_be64(const void *p) { - return __get_unaligned_be64((const u8 *)p); + return __get_unaligned_be64(p); } static inline void put_unaligned_be16(u16 val, void *p) diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h index 57d3114656e5..303289492859 100644 --- a/include/linux/unaligned/generic.h +++ b/include/linux/unaligned/generic.h @@ -2,6 +2,8 @@ #ifndef _LINUX_UNALIGNED_GENERIC_H #define _LINUX_UNALIGNED_GENERIC_H +#include <linux/types.h> + /* * Cause a link-time error if we try an unaligned access other than * 1,2,4 or 8 bytes long @@ -66,4 +68,48 @@ extern void __bad_unaligned_access_size(void); } \ (void)0; }) +static inline u32 __get_unaligned_be24(const u8 *p) +{ + return p[0] << 16 | p[1] << 8 | p[2]; +} + +static inline u32 get_unaligned_be24(const void *p) +{ + return __get_unaligned_be24(p); +} + +static inline u32 __get_unaligned_le24(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16; +} + +static inline u32 get_unaligned_le24(const void *p) +{ + return __get_unaligned_le24(p); +} + +static inline void __put_unaligned_be24(const u32 val, u8 *p) +{ + *p++ = val >> 16; + *p++ = val >> 8; + *p++ = val; +} + +static inline void put_unaligned_be24(const u32 val, void *p) +{ + __put_unaligned_be24(val, p); +} + +static inline void __put_unaligned_le24(const u32 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; + *p++ = val >> 16; +} + +static inline void put_unaligned_le24(const u32 val, void *p) +{ + __put_unaligned_le24(val, p); +} + #endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h index 1628b75866f0..2248dcb0df76 100644 --- a/include/linux/unaligned/le_byteshift.h +++ b/include/linux/unaligned/le_byteshift.h @@ -40,17 +40,17 @@ static inline void __put_unaligned_le64(u64 val, u8 *p) static inline u16 get_unaligned_le16(const void *p) { - return __get_unaligned_le16((const u8 *)p); + return __get_unaligned_le16(p); } static inline u32 get_unaligned_le32(const void *p) { - return __get_unaligned_le32((const u8 *)p); + return __get_unaligned_le32(p); } static inline u64 get_unaligned_le64(const void *p) { - return __get_unaligned_le64((const u8 *)p); + return __get_unaligned_le64(p); } static inline void put_unaligned_le16(u16 val, void *p) diff --git a/include/linux/usb.h b/include/linux/usb.h index e656e7b4b1e4..9f3c721c70dc 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -325,7 +325,7 @@ struct usb_interface_cache { /* variable-length array of alternate settings for this interface, * stored in no particular order */ - struct usb_host_interface altsetting[0]; + struct usb_host_interface altsetting[]; }; #define ref_to_usb_interface_cache(r) \ container_of(r, struct usb_interface_cache, ref) @@ -708,6 +708,7 @@ struct usb_device { unsigned lpm_disable_count; u16 hub_delay; + unsigned use_generic_driver:1; }; #define to_usb_device(d) container_of(d, struct usb_device, dev) @@ -1228,12 +1229,16 @@ struct usb_driver { * @drvwrap: Driver-model core structure wrapper. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. + * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect, + * resume and suspend functions will be called in addition to the driver's + * own, so this part of the setup does not need to be replicated. * * USB drivers must provide all the fields listed above except drvwrap. */ struct usb_device_driver { const char *name; + bool (*match) (struct usb_device *udev); int (*probe) (struct usb_device *udev); void (*disconnect) (struct usb_device *udev); @@ -1241,7 +1246,9 @@ struct usb_device_driver { int (*resume) (struct usb_device *udev, pm_message_t message); const struct attribute_group **dev_groups; struct usbdrv_wrap drvwrap; + const struct usb_device_id *id_table; unsigned int supports_autosuspend:1; + unsigned int generic_subclass:1; }; #define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ drvwrap.driver) @@ -1582,7 +1589,7 @@ struct urb { int error_count; /* (return) number of ISO errors */ void *context; /* (in) context for completion */ usb_complete_t complete; /* (in) completion routine */ - struct usb_iso_packet_descriptor iso_frame_desc[0]; + struct usb_iso_packet_descriptor iso_frame_desc[]; /* (in) ISO ONLY */ }; diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index ba4b3e3327ff..ead8c9a47c6a 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -153,7 +153,19 @@ struct uac2_feature_unit_descriptor { __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ - __u8 bmaControls[0]; /* variable length */ + __u8 bmaControls[]; /* variable length */ +} __attribute__((packed)); + +/* 4.7.2.10 Effect Unit Descriptor */ + +struct uac2_effect_unit_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubtype; + __u8 bUnitID; + __le16 wEffectType; + __u8 bSourceID; + __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); /* 4.9.2 Class-Specific AS Interface Descriptor */ diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h index 6b708434b7f9..c69a6f2e6837 100644 --- a/include/linux/usb/audio-v3.h +++ b/include/linux/usb/audio-v3.h @@ -109,7 +109,7 @@ struct uac3_feature_unit_descriptor { __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ - __u8 bmaControls[0]; /* variable length */ + __u8 bmaControls[]; /* variable length */ /* wFeatureDescrStr omitted */ } __attribute__((packed)); diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 1646c06989df..0ce4377545f8 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -46,9 +46,12 @@ #define CDC_NCM_DATA_ALTSETTING_NCM 1 #define CDC_NCM_DATA_ALTSETTING_MBIM 2 -/* CDC NCM subclass 3.2.1 */ +/* CDC NCM subclass 3.3.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 +/* CDC NCM subclass 3.3.2 */ +#define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20 + /* Maximum NTB length */ #define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ #define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ @@ -84,7 +87,7 @@ /* Driver flags */ #define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */ #define CDC_MBIM_FLAG_AVOID_ALTSETTING_TOGGLE 0x04 /* Avoid altsetting toggle during init */ -#define CDC_NCM_FLAG_RESET_NTB16 0x08 /* set NDP16 one more time after altsetting switch */ +#define CDC_NCM_FLAG_PREFER_NTB32 0x08 /* prefer NDP32 over NDP16 */ #define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) @@ -113,7 +116,11 @@ struct cdc_ncm_ctx { u32 timer_interval; u32 max_ndp_size; - struct usb_cdc_ncm_ndp16 *delayed_ndp16; + u8 is_ndp16; + union { + struct usb_cdc_ncm_ndp16 *delayed_ndp16; + struct usb_cdc_ncm_ndp32 *delayed_ndp32; + }; u32 tx_timer_pending; u32 tx_curr_frame_num; @@ -150,6 +157,8 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset); +int cdc_ncm_rx_verify_nth32(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); +int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset); struct sk_buff * cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in); diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index a15ce99dfc2d..78e006355557 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -151,7 +151,7 @@ struct ehci_regs { #define PORT_OWNER (1<<13) /* true: companion hc owns this port */ #define PORT_POWER (1<<12) /* true: has power (see PPC) */ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ -/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ +#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */ /* 9 reserved */ #define PORT_LPM (1<<9) /* LPM transaction */ #define PORT_RESET (1<<8) /* reset port */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 124462d65eac..9411c08a5c7e 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -767,7 +767,7 @@ struct usb_gadget_strings { struct usb_gadget_string_container { struct list_head list; - u8 *stash[0]; + u8 *stash[]; }; /* put descriptor for string with that id into buf (buflen >= 256) */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 712b2a603645..e12105ed3834 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -228,7 +228,7 @@ struct usb_hcd { /* The HC driver's private data is stored at the end of * this structure. */ - unsigned long hcd_priv[0] + unsigned long hcd_priv[] __attribute__ ((aligned(sizeof(s64)))); }; diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index efac3af83d6b..0164fed31b06 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -13,8 +13,9 @@ enum usb_role { USB_ROLE_DEVICE, }; -typedef int (*usb_role_switch_set_t)(struct device *dev, enum usb_role role); -typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev); +typedef int (*usb_role_switch_set_t)(struct usb_role_switch *sw, + enum usb_role role); +typedef enum usb_role (*usb_role_switch_get_t)(struct usb_role_switch *sw); /** * struct usb_role_switch_desc - USB Role Switch Descriptor @@ -25,6 +26,8 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev); * @set: Callback for setting the role * @get: Callback for getting the role (optional) * @allow_userspace_control: If true userspace may change the role through sysfs + * @driver_data: Private data pointer + * @name: Name for the switch (optional) * * @usb2_port and @usb3_port will point to the USB host port and @udc to the USB * device controller behind the USB connector with the role switch. If @@ -40,6 +43,8 @@ struct usb_role_switch_desc { usb_role_switch_set_t set; usb_role_switch_get_t get; bool allow_userspace_control; + void *driver_data; + const char *name; }; @@ -57,6 +62,9 @@ struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc); void usb_role_switch_unregister(struct usb_role_switch *sw); + +void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data); +void *usb_role_switch_get_drvdata(struct usb_role_switch *sw); #else static inline int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) @@ -90,6 +98,17 @@ usb_role_switch_register(struct device *parent, } static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { } + +static inline void +usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data) +{ +} + +static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw) +{ + return NULL; +} + #endif #endif /* __LINUX_USB_ROLE_H */ diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index c358b3fd05c9..b00a2642a9cd 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -198,8 +198,6 @@ struct typec_operations { * @pd_revision: USB Power Delivery Specification revision if supported * @prefer_role: Initial role preference (DRP ports). * @accessory: Supported Accessory Modes - * @sw: Cable plug orientation switch - * @mux: Multiplexer switch for Alternate/Accessory Modes * @fwnode: Optional fwnode of the port * @driver_data: Private pointer for driver specific info * @ops: Port operations vector @@ -213,6 +211,7 @@ struct typec_capability { u16 pd_revision; /* 0300H = "3.0" */ int prefer_role; enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; + unsigned int orientation_aware:1; struct fwnode_handle *fwnode; void *driver_data; diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index 923ff3af0628..d834e236c6df 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -126,13 +126,6 @@ void typec_altmode_put_plug(struct typec_altmode *plug); struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, size_t n, u16 svid, u8 mode); -struct typec_altmode * -typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode, - struct notifier_block *nb); - -void typec_altmode_unregister_notifier(struct typec_altmode *adev, - struct notifier_block *nb); - /** * typec_altmode_get_orientation - Get cable plug orientation * altmode: Handle to the alternate mode diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h index be7292c0be5e..a9d9957933dc 100644 --- a/include/linux/usb/typec_mux.h +++ b/include/linux/usb/typec_mux.h @@ -3,6 +3,7 @@ #ifndef __USB_TYPEC_MUX #define __USB_TYPEC_MUX +#include <linux/property.h> #include <linux/usb/typec.h> struct device; @@ -17,11 +18,20 @@ typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw, struct typec_switch_desc { struct fwnode_handle *fwnode; typec_switch_set_fn_t set; + const char *name; void *drvdata; }; -struct typec_switch *typec_switch_get(struct device *dev); +struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode); void typec_switch_put(struct typec_switch *sw); +int typec_switch_set(struct typec_switch *sw, + enum typec_orientation orientation); + +static inline struct typec_switch *typec_switch_get(struct device *dev) +{ + return fwnode_typec_switch_get(dev_fwnode(dev)); +} + struct typec_switch * typec_switch_register(struct device *parent, const struct typec_switch_desc *desc); @@ -42,12 +52,21 @@ typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux, struct typec_mux_desc { struct fwnode_handle *fwnode; typec_mux_set_fn_t set; + const char *name; void *drvdata; }; -struct typec_mux * -typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc); +struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode, + const struct typec_altmode_desc *desc); void typec_mux_put(struct typec_mux *mux); +int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state); + +static inline struct typec_mux * +typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc) +{ + return fwnode_typec_mux_get(dev_fwnode(dev), desc); +} + struct typec_mux * typec_mux_register(struct device *parent, const struct typec_mux_desc *desc); void typec_mux_unregister(struct typec_mux *mux); diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h new file mode 100644 index 000000000000..47c2d501ddce --- /dev/null +++ b/include/linux/usb/typec_tbt.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __USB_TYPEC_TBT_H +#define __USB_TYPEC_TBT_H + +#include <linux/usb/typec_altmode.h> + +#define USB_TYPEC_VENDOR_INTEL 0x8087 +/* Alias for convenience */ +#define USB_TYPEC_TBT_SID USB_TYPEC_VENDOR_INTEL + +/* Connector state for Thunderbolt3 */ +#define TYPEC_TBT_MODE TYPEC_STATE_MODAL + +/** + * struct typec_thunderbolt_data - Thundebolt3 Alt Mode specific data + * @device_mode: Device Discover Mode VDO + * @cable_mode: Cable Discover Mode VDO + * @enter_vdo: Enter Mode VDO + */ +struct typec_thunderbolt_data { + u32 device_mode; + u32 cable_mode; + u32 enter_vdo; +}; + +/* TBT3 Device Discover Mode VDO bits */ +#define TBT_MODE BIT(0) +#define TBT_ADAPTER(_vdo_) (((_vdo_) & BIT(16)) >> 16) +#define TBT_ADAPTER_LEGACY 0 +#define TBT_ADAPTER_TBT3 1 +#define TBT_INTEL_SPECIFIC_B0 BIT(26) +#define TBT_VENDOR_SPECIFIC_B0 BIT(30) +#define TBT_VENDOR_SPECIFIC_B1 BIT(31) + +#define TBT_SET_ADAPTER(a) (((a) & 1) << 16) + +/* TBT3 Cable Discover Mode VDO bits */ +#define TBT_CABLE_SPEED(_vdo_) (((_vdo_) & GENMASK(18, 16)) >> 16) +#define TBT_CABLE_USB3_GEN1 1 +#define TBT_CABLE_USB3_PASSIVE 2 +#define TBT_CABLE_10_AND_20GBPS 3 +#define TBT_CABLE_ROUNDED BIT(19) +#define TBT_CABLE_OPTICAL BIT(21) +#define TBT_CABLE_RETIMER BIT(22) +#define TBT_CABLE_LINK_TRAINING BIT(23) + +#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16) + +/* TBT3 Device Enter Mode VDO bits */ +#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s) +#define TBT_ENTER_MODE_ACTIVE_CABLE BIT(24) + +#endif /* __USB_TYPEC_TBT_H */ diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 000a5954b2e8..4a19ac3f24d0 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -92,6 +92,6 @@ enum { US_DO_ALL_FLAGS }; #include <linux/usb/storage.h> extern int usb_usual_ignore_device(struct usb_interface *intf); -extern struct usb_device_id usb_storage_usb_ids[]; +extern const struct usb_device_id usb_storage_usb_ids[]; #endif /* __LINUX_USB_USUAL_H */ diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h index 79aab0065ec8..14ea197ce37f 100644 --- a/include/linux/usbdevice_fs.h +++ b/include/linux/usbdevice_fs.h @@ -69,7 +69,7 @@ struct usbdevfs_urb32 { compat_int_t error_count; compat_uint_t signr; compat_caddr_t usercontext; /* unused */ - struct usbdevfs_iso_packet_desc iso_frame_desc[0]; + struct usbdevfs_iso_packet_desc iso_frame_desc[]; }; struct usbdevfs_ioctl32 { diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index ac9d71e24b81..a8e5f3ea9bb2 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -14,6 +14,8 @@ #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */ #include <linux/fcntl.h> +#include <linux/mm.h> +#include <asm-generic/pgtable_uffd.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -34,11 +36,14 @@ extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool *mmap_changing); + bool *mmap_changing, __u64 mode); extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, bool *mmap_changing); +extern int mwriteprotect_range(struct mm_struct *dst_mm, + unsigned long start, unsigned long len, + bool enable_wp, bool *mmap_changing); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, @@ -52,6 +57,23 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma) return vma->vm_flags & VM_UFFD_MISSING; } +static inline bool userfaultfd_wp(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_UFFD_WP; +} + +static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, + pte_t pte) +{ + return userfaultfd_wp(vma) && pte_uffd_wp(pte); +} + +static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, + pmd_t pmd) +{ + return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); +} + static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); @@ -96,6 +118,24 @@ static inline bool userfaultfd_missing(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_wp(struct vm_area_struct *vma) +{ + return false; +} + +static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, + pte_t pte) +{ + return false; +} + +static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, + pmd_t pmd) +{ + return false; +} + + static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return false; diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 0c631e2a73b6..d41b0d3e9474 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -43,6 +43,16 @@ static inline void guid_copy(guid_t *dst, const guid_t *src) memcpy(dst, src, sizeof(guid_t)); } +static inline void import_guid(guid_t *dst, const __u8 *src) +{ + memcpy(dst, src, sizeof(guid_t)); +} + +static inline void export_guid(__u8 *dst, const guid_t *src) +{ + memcpy(dst, src, sizeof(guid_t)); +} + static inline bool guid_is_null(const guid_t *guid) { return guid_equal(guid, &guid_null); @@ -58,12 +68,23 @@ static inline void uuid_copy(uuid_t *dst, const uuid_t *src) memcpy(dst, src, sizeof(uuid_t)); } +static inline void import_uuid(uuid_t *dst, const __u8 *src) +{ + memcpy(dst, src, sizeof(uuid_t)); +} + +static inline void export_uuid(__u8 *dst, const uuid_t *src) +{ + memcpy(dst, src, sizeof(uuid_t)); +} + static inline bool uuid_is_null(const uuid_t *uuid) { return uuid_equal(uuid, &uuid_null); } void generate_random_uuid(unsigned char uuid[16]); +void generate_random_guid(unsigned char guid[16]); extern void guid_gen(guid_t *u); extern void uuid_gen(uuid_t *u); @@ -77,7 +98,6 @@ int guid_parse(const char *uuid, guid_t *u); int uuid_parse(const char *uuid, uuid_t *u); /* backwards compatibility, don't use in new code */ -#define uuid_le_gen(u) guid_gen(u) #define uuid_le_to_bin(guid, u) guid_parse(guid, u) static inline int uuid_le_cmp(const guid_t u1, const guid_t u2) diff --git a/include/linux/vfio.h b/include/linux/vfio.h index e42a711a2800..5d92ee15d098 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -26,6 +26,9 @@ * operations documented below * @mmap: Perform mmap(2) on a region of the device file descriptor * @request: Request for the bus driver to release the device + * @match: Optional device name match callback (return: 0 for no-match, >0 for + * match, -errno for abort (ex. match with insufficient or incorrect + * additional args) */ struct vfio_device_ops { char *name; @@ -39,6 +42,7 @@ struct vfio_device_ops { unsigned long arg); int (*mmap)(void *device_data, struct vm_area_struct *vma); void (*request)(void *device_data, unsigned int count); + int (*match)(void *device_data, char *buf); }; extern struct iommu_group *vfio_iommu_group_get(struct device *dev); @@ -82,6 +86,8 @@ struct vfio_iommu_driver_ops { struct notifier_block *nb); int (*unregister_notifier)(void *iommu_data, struct notifier_block *nb); + int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, + void *data, size_t count, bool write); }; extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); @@ -94,6 +100,8 @@ extern void vfio_unregister_iommu_driver( */ extern struct vfio_group *vfio_group_get_external_user(struct file *filep); extern void vfio_group_put_external_user(struct vfio_group *group); +extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device + *dev); extern bool vfio_external_group_match_file(struct vfio_group *group, struct file *filep); extern int vfio_external_user_iommu_id(struct vfio_group *group); @@ -107,6 +115,15 @@ extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage); +extern int vfio_group_pin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage, + int prot, unsigned long *phys_pfn); +extern int vfio_group_unpin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage); + +extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, + void *data, size_t len, bool write); + /* each type has independent events */ enum vfio_notify_type { VFIO_IOMMU_NOTIFY = 0, diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 47a3441cf4c4..ffef0f279747 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -73,9 +73,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_TRANSPARENT_HUGEPAGE THP_FAULT_ALLOC, THP_FAULT_FALLBACK, + THP_FAULT_FALLBACK_CHARGE, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, THP_FILE_ALLOC, + THP_FILE_FALLBACK, + THP_FILE_FALLBACK_CHARGE, THP_FILE_MAPPED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, @@ -115,6 +118,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) +#define THP_FILE_FALLBACK ({ BUILD_BUG(); 0; }) +#define THP_FILE_FALLBACK_CHARGE ({ BUILD_BUG(); 0; }) #define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) #endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ec3813236699..0507a162ccd0 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -141,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); -void vmalloc_sync_all(void); - +void vmalloc_sync_mappings(void); +void vmalloc_sync_unmappings(void); + /* * Lowlevel-APIs (not for driver use!) */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 8dc77e40bc03..abf5bccf906a 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -28,8 +28,9 @@ #define BROKEN_GRAPHICS_PROGRAMS 1 #endif -extern void kd_mksound(unsigned int hz, unsigned int ticks); -extern int kbd_rate(struct kbd_repeat *rep); +void kd_mksound(unsigned int hz, unsigned int ticks); +int kbd_rate(struct kbd_repeat *rep); + extern int fg_console, last_console, want_console; /* console.c */ @@ -131,11 +132,11 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new); int vt_waitactive(int n); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc); -extern int do_unbind_con_driver(const struct consw *csw, int first, int last, - int deflt); +int do_unbind_con_driver(const struct consw *csw, int first, int last, + int deflt); int vty_init(const struct file_operations *console_fops); -extern char vt_dont_switch; +extern bool vt_dont_switch; extern int default_utf8; extern int global_cursor_default; @@ -146,7 +147,7 @@ struct vt_spawn_console { }; extern struct vt_spawn_console vt_spawn_con; -extern int vt_move_to_console(unsigned int vt, int alloc); +int vt_move_to_console(unsigned int vt, int alloc); /* Interfaces for VC notification of character events (for accessibility etc) */ @@ -155,35 +156,34 @@ struct vt_notifier_param { unsigned int c; /* Printed char */ }; -extern int register_vt_notifier(struct notifier_block *nb); -extern int unregister_vt_notifier(struct notifier_block *nb); +int register_vt_notifier(struct notifier_block *nb); +int unregister_vt_notifier(struct notifier_block *nb); -extern void hide_boot_cursor(bool hide); +void hide_boot_cursor(bool hide); /* keyboard provided interfaces */ -extern int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); -extern int vt_do_kdskbmode(int console, unsigned int arg); -extern int vt_do_kdskbmeta(int console, unsigned int arg); -extern int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, - int perm); -extern int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, - int perm, int console); -extern int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, - int perm); -extern int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); -extern int vt_do_kdgkbmode(int console); -extern int vt_do_kdgkbmeta(int console); -extern void vt_reset_unicode(int console); -extern int vt_get_shift_state(void); -extern void vt_reset_keyboard(int console); -extern int vt_get_leds(int console, int flag); -extern int vt_get_kbd_mode_bit(int console, int bit); -extern void vt_set_kbd_mode_bit(int console, int bit); -extern void vt_clr_kbd_mode_bit(int console, int bit); -extern void vt_set_led_state(int console, int leds); -extern void vt_set_led_state(int console, int leds); -extern void vt_kbd_con_start(int console); -extern void vt_kbd_con_stop(int console); +int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); +int vt_do_kdskbmode(int console, unsigned int arg); +int vt_do_kdskbmeta(int console, unsigned int arg); +int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, + int perm); +int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, + int console); +int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm); +int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); +int vt_do_kdgkbmode(int console); +int vt_do_kdgkbmeta(int console); +void vt_reset_unicode(int console); +int vt_get_shift_state(void); +void vt_reset_keyboard(int console); +int vt_get_leds(int console, int flag); +int vt_get_kbd_mode_bit(int console, int bit); +void vt_set_kbd_mode_bit(int console, int bit); +void vt_clr_kbd_mode_bit(int console, int bit); +void vt_set_led_state(int console, int leds); +void vt_set_led_state(int console, int leds); +void vt_kbd_con_start(int console); +void vt_kbd_con_stop(int console); void vc_scrolldelta_helper(struct vc_data *c, int lines, unsigned int rolled_over, void *_base, unsigned int size); diff --git a/include/linux/wait.h b/include/linux/wait.h index 3283c8d02137..feeb6be5cad6 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -20,6 +20,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int #define WQ_FLAG_EXCLUSIVE 0x01 #define WQ_FLAG_WOKEN 0x02 #define WQ_FLAG_BOOKMARK 0x04 +#define WQ_FLAG_CUSTOM 0x08 /* * A single wait-queue entry structure: diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4261d1c6e87b..8b505d22fc0e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); * * We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. + * + * Memory-ordering properties: If it returns %true, guarantees that all stores + * preceding the call to queue_work() in the program order will be visible from + * the CPU which will execute @work by the time such work executes, e.g., + * + * { x is initially 0 } + * + * CPU0 CPU1 + * + * WRITE_ONCE(x, 1); [ @work is being executed ] + * r0 = queue_work(wq, work); r1 = READ_ONCE(x); + * + * Forbids: r0 == true && r1 == 0 */ static inline bool queue_work(struct workqueue_struct *wq, struct work_struct *work) @@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work) * This puts a job in the kernel-global workqueue if it was not already * queued and leaves it in the same position on the kernel-global * workqueue otherwise. + * + * Shares the same memory-ordering properties of queue_work(), cf. the + * DocBook header of queue_work(). */ static inline bool schedule_work(struct work_struct *work) { @@ -649,7 +665,7 @@ int workqueue_online_cpu(unsigned int cpu); int workqueue_offline_cpu(unsigned int cpu); #endif -int __init workqueue_init_early(void); -int __init workqueue_init(void); +void __init workqueue_init_early(void); +void __init workqueue_init(void); #endif diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f73e1775ded0..14c893433139 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -32,8 +32,8 @@ * The following internal entries have a special meaning: * * 0-62: Sibling entries - * 256: Zero entry - * 257: Retry entry + * 256: Retry entry + * 257: Zero entry * * Errors are also represented as internal entries, but use the negative * space (-4094 to -2). They're never stored in the slots array; only @@ -1648,6 +1648,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) { struct xa_node *node = xas->xa_node; + void *entry; unsigned int offset; if (unlikely(xas_not_node(node) || node->shift)) @@ -1659,7 +1660,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, return NULL; if (offset == XA_CHUNK_SIZE) return xas_find_marked(xas, max, mark); - return xa_entry(xas->xa, node, offset); + entry = xa_entry(xas->xa, node, offset); + if (!entry) + return xas_find_marked(xas, max, mark); + return entry; } /* diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 6dad031be3c2..4cf6e11f4a3c 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -102,7 +102,8 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size); int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, - const void *value, size_t size, int flags); + const void *value, size_t size, int flags, + ssize_t *removed_size); ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_list_add(struct simple_xattrs *xattrs, diff --git a/include/linux/zorro.h b/include/linux/zorro.h index 63fbba0740c2..e2e4de188d84 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -41,13 +41,6 @@ struct zorro_dev { /* - * Zorro bus - */ - -extern struct bus_type zorro_bus_type; - - - /* * Zorro device drivers */ @@ -70,11 +63,6 @@ struct zorro_driver { /* New-style probing */ extern int zorro_register_driver(struct zorro_driver *); extern void zorro_unregister_driver(struct zorro_driver *); -extern const struct zorro_device_id *zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z); -static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z) -{ - return z->driver; -} extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */ diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index 139e93be13b0..38956969fd12 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h @@ -20,31 +20,14 @@ struct cec_notifier; #if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) /** - * cec_notifier_get_conn - find or create a new cec_notifier for the given - * device and connector tuple. - * @dev: device that sends the events. - * @conn: the connector name from which the event occurs - * - * If a notifier for device @dev already exists, then increase the refcount - * and return that notifier. - * - * If it doesn't exist, then allocate a new notifier struct and return a - * pointer to that new struct. - * - * Return NULL if the memory could not be allocated. - */ -struct cec_notifier *cec_notifier_get_conn(struct device *dev, - const char *conn); - -/** * cec_notifier_conn_register - find or create a new cec_notifier for the given * HDMI device and connector tuple. * @hdmi_dev: HDMI device that sends the events. - * @conn_name: the connector name from which the event occurs. May be NULL + * @port_name: the connector name from which the event occurs. May be NULL * if there is always only one HDMI connector created by the HDMI device. * @conn_info: the connector info from which the event occurs (may be NULL) * - * If a notifier for device @dev and connector @conn_name already exists, then + * If a notifier for device @dev and connector @port_name already exists, then * increase the refcount and return that notifier. * * If it doesn't exist, then allocate a new notifier struct and return a @@ -53,7 +36,7 @@ struct cec_notifier *cec_notifier_get_conn(struct device *dev, * Return NULL if the memory could not be allocated. */ struct cec_notifier * -cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name, +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, const struct cec_connector_info *conn_info); /** @@ -67,11 +50,11 @@ void cec_notifier_conn_unregister(struct cec_notifier *n); * cec_notifier_cec_adap_register - find or create a new cec_notifier for the * given device. * @hdmi_dev: HDMI device that sends the events. - * @conn_name: the connector name from which the event occurs. May be NULL + * @port_name: the connector name from which the event occurs. May be NULL * if there is always only one HDMI connector created by the HDMI device. * @adap: the cec adapter that registered this notifier. * - * If a notifier for device @dev and connector @conn_name already exists, then + * If a notifier for device @dev and connector @port_name already exists, then * increase the refcount and return that notifier. * * If it doesn't exist, then allocate a new notifier struct and return a @@ -80,7 +63,7 @@ void cec_notifier_conn_unregister(struct cec_notifier *n); * Return NULL if the memory could not be allocated. */ struct cec_notifier * -cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name, +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, struct cec_adapter *adap); /** @@ -125,15 +108,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, struct device *cec_notifier_parse_hdmi_phandle(struct device *dev); #else -static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev, - const char *conn) -{ - /* A non-NULL pointer is expected on success */ - return (struct cec_notifier *)0xdeadfeed; -} static inline struct cec_notifier * -cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name, +cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name, const struct cec_connector_info *conn_info) { /* A non-NULL pointer is expected on success */ @@ -145,7 +122,7 @@ static inline void cec_notifier_conn_unregister(struct cec_notifier *n) } static inline struct cec_notifier * -cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name, +cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name, struct cec_adapter *adap) { /* A non-NULL pointer is expected on success */ diff --git a/include/media/h264-ctrls.h b/include/media/h264-ctrls.h index e877bf1d537c..1c6ff7d63bca 100644 --- a/include/media/h264-ctrls.h +++ b/include/media/h264-ctrls.h @@ -185,6 +185,8 @@ struct v4l2_ctrl_h264_slice_params { #define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01 #define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02 #define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04 +#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08 +#define V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD 0x10 struct v4l2_h264_dpb_entry { __u64 reference_ts; diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h deleted file mode 100644 index 80f8251d87a3..000000000000 --- a/include/media/i2c/smiapp.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * include/media/i2c/smiapp.h - * - * Generic driver for SMIA/SMIA++ compliant camera modules - * - * Copyright (C) 2011--2012 Nokia Corporation - * Contact: Sakari Ailus <sakari.ailus@iki.fi> - */ - -#ifndef __SMIAPP_H_ -#define __SMIAPP_H_ - -#include <media/v4l2-subdev.h> - -#define SMIAPP_NAME "smiapp" - -#define SMIAPP_DFL_I2C_ADDR (0x20 >> 1) /* Default I2C Address */ -#define SMIAPP_ALT_I2C_ADDR (0x6e >> 1) /* Alternate I2C Address */ - -#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK 0 -#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1 -#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2 - -/* - * Sometimes due to board layout considerations the camera module can be - * mounted rotated. The typical rotation used is 180 degrees which can be - * corrected by giving a default H-FLIP and V-FLIP in the sensor readout. - * FIXME: rotation also changes the bayer pattern. - */ -enum smiapp_module_board_orient { - SMIAPP_MODULE_BOARD_ORIENT_0 = 0, - SMIAPP_MODULE_BOARD_ORIENT_180, -}; - -struct smiapp_flash_strobe_parms { - u8 mode; - u32 strobe_width_high_us; - u16 strobe_delay; - u16 stobe_start_point; - u8 trigger; -}; - -struct smiapp_hwconfig { - /* - * Change the cci address if i2c_addr_alt is set. - * Both default and alternate cci addr need to be present - */ - unsigned short i2c_addr_dfl; /* Default i2c addr */ - unsigned short i2c_addr_alt; /* Alternate i2c addr */ - - uint32_t ext_clk; /* sensor external clk */ - - unsigned int lanes; /* Number of CSI-2 lanes */ - uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */ - uint64_t *op_sys_clock; - - enum smiapp_module_board_orient module_board_orient; - - struct smiapp_flash_strobe_parms *strobe_setup; -}; - -#endif /* __SMIAPP_H_ */ diff --git a/include/media/rc-core.h b/include/media/rc-core.h index 1f695d9c200a..d3f85df64bb2 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -192,7 +192,7 @@ struct rc_dev { struct timer_list timer_repeat; u32 last_keycode; enum rc_proto last_protocol; - u32 last_scancode; + u64 last_scancode; u8 last_toggle; u32 timeout; u32 min_timeout; @@ -284,12 +284,12 @@ int devm_rc_register_device(struct device *parent, struct rc_dev *dev); void rc_unregister_device(struct rc_dev *dev); void rc_repeat(struct rc_dev *dev); -void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, +void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, u8 toggle); void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol, - u32 scancode, u8 toggle); + u64 scancode, u8 toggle); void rc_keyup(struct rc_dev *dev); -u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode); +u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode); /* * From rc-raw.c diff --git a/include/media/rc-map.h b/include/media/rc-map.h index f99575a0d29c..0ce896f10202 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -85,11 +85,11 @@ /** * struct rc_map_table - represents a scancode/keycode pair * - * @scancode: scan code (u32) + * @scancode: scan code (u64) * @keycode: Linux input keycode */ struct rc_map_table { - u32 scancode; + u64 scancode; u32 keycode; }; @@ -274,6 +274,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100" #define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350" #define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr" +#define RC_MAP_KII_PRO "rc-videostrong-kii-pro" #define RC_MAP_WETEK_HUB "rc-wetek-hub" #define RC_MAP_WETEK_PLAY2 "rc-wetek-play2" #define RC_MAP_WINFAST "rc-winfast" diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 48531e57cc5a..4602c15ff878 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -24,7 +24,7 @@ /** * enum vfl_devnode_type - type of V4L2 device node * - * @VFL_TYPE_GRABBER: for video input/output devices + * @VFL_TYPE_VIDEO: for video input/output devices * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext) * @VFL_TYPE_RADIO: for radio tuners * @VFL_TYPE_SUBDEV: for V4L2 subdevices @@ -33,7 +33,7 @@ * @VFL_TYPE_MAX: number of VFL types, must always be last in the enum */ enum vfl_devnode_type { - VFL_TYPE_GRABBER = 0, + VFL_TYPE_VIDEO, VFL_TYPE_VBI, VFL_TYPE_RADIO, VFL_TYPE_SUBDEV, diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index 95353ae476a1..7c912b7d2870 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -240,7 +240,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Ignore any errors. * @@ -265,7 +265,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Ignore any errors. * @@ -293,7 +293,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Return: * @@ -328,7 +328,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Return: * @@ -359,7 +359,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Ignore any errors. * @@ -388,7 +388,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Return: * @@ -419,7 +419,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Ignore any errors. * @@ -447,7 +447,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Return: * diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index f6a7bcd13197..dd82d6d9764e 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -113,14 +113,75 @@ struct v4l2_fwnode_endpoint { * struct v4l2_fwnode_link - a link between two endpoints * @local_node: pointer to device_node of this endpoint * @local_port: identifier of the port this endpoint belongs to + * @local_id: identifier of the id this endpoint belongs to * @remote_node: pointer to device_node of the remote endpoint * @remote_port: identifier of the port the remote endpoint belongs to + * @remote_id: identifier of the id the remote endpoint belongs to */ struct v4l2_fwnode_link { struct fwnode_handle *local_node; unsigned int local_port; + unsigned int local_id; struct fwnode_handle *remote_node; unsigned int remote_port; + unsigned int remote_id; +}; + +/** + * enum v4l2_connector_type - connector type + * @V4L2_CONN_UNKNOWN: unknown connector type, no V4L2 connector configuration + * @V4L2_CONN_COMPOSITE: analog composite connector + * @V4L2_CONN_SVIDEO: analog svideo connector + */ +enum v4l2_connector_type { + V4L2_CONN_UNKNOWN, + V4L2_CONN_COMPOSITE, + V4L2_CONN_SVIDEO, +}; + +/** + * struct v4l2_connector_link - connector link data structure + * @head: structure to be used to add the link to the + * &struct v4l2_fwnode_connector + * @fwnode_link: &struct v4l2_fwnode_link link between the connector and the + * device the connector belongs to. + */ +struct v4l2_connector_link { + struct list_head head; + struct v4l2_fwnode_link fwnode_link; +}; + +/** + * struct v4l2_fwnode_connector_analog - analog connector data structure + * @sdtv_stds: sdtv standards this connector supports, set to V4L2_STD_ALL + * if no restrictions are specified. + */ +struct v4l2_fwnode_connector_analog { + v4l2_std_id sdtv_stds; +}; + +/** + * struct v4l2_fwnode_connector - the connector data structure + * @name: the connector device name + * @label: optional connector label + * @type: connector type + * @links: list of all connector &struct v4l2_connector_link links + * @nr_of_links: total number of links + * @connector: connector configuration + * @connector.analog: analog connector configuration + * &struct v4l2_fwnode_connector_analog + */ +struct v4l2_fwnode_connector { + const char *name; + const char *label; + enum v4l2_connector_type type; + struct list_head links; + unsigned int nr_of_links; + + union { + struct v4l2_fwnode_connector_analog analog; + /* future connectors */ + } connector; }; /** @@ -234,6 +295,66 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode, void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link); /** + * v4l2_fwnode_connector_free() - free the V4L2 connector acquired memory + * @connector: the V4L2 connector resources of which are to be released + * + * Free all allocated memory and put all links acquired by + * v4l2_fwnode_connector_parse() and v4l2_fwnode_connector_add_link(). + * + * It is safe to call this function with NULL argument or on a V4L2 connector + * the parsing of which failed. + */ +void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_parse() - initialize the 'struct v4l2_fwnode_connector' + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to or to the connector endpoint fwnode handle. + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Fill the &struct v4l2_fwnode_connector with the connector type, label and + * all &enum v4l2_connector_type specific connector data. The label is optional + * so it is set to %NULL if no one was found. The function initialize the links + * to zero. Adding links to the connector is done by calling + * v4l2_fwnode_connector_add_link(). + * + * The memory allocated for the label must be freed when no longer needed. + * Freeing the memory is done by v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode is invalid + * * %-ENOTCONN if connector type is unknown or connector device can't be found + */ +int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** + * v4l2_fwnode_connector_add_link - add a link between a connector node and + * a v4l2-subdev node. + * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector + * is connected to + * @connector: pointer to the V4L2 fwnode connector data structure + * + * Add a new &struct v4l2_connector_link link to the + * &struct v4l2_fwnode_connector connector links list. The link local_node + * points to the connector node, the remote_node to the host v4l2 (sub)dev. + * + * The taken references to remote_node and local_node must be dropped and the + * allocated memory must be freed when no longer needed. Both is done by calling + * v4l2_fwnode_connector_free(). + * + * Return: + * * %0 on success or a negative error code on failure: + * * %-EINVAL if @fwnode or @connector is invalid or @connector type is unknown + * * %-ENOMEM on link memory allocation failure + * * %-ENOTCONN if remote connector device can't be found + * * %-ENOLINK if link parsing between v4l2 (sub)dev and connector fails + */ +int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, + struct v4l2_fwnode_connector *connector); + +/** * typedef parse_endpoint_func - Driver's callback function to be called on * each V4L2 fwnode endpoint. * @@ -406,4 +527,26 @@ v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd, unsigned int num_ports, parse_endpoint_func parse_endpoint); +/* Helper macros to access the connector links. */ + +/** v4l2_connector_last_link - Helper macro to get the first + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the first added &struct v4l2_connector_link connector + * link or @NULL if the connector has no links. + */ +#define v4l2_connector_first_link(v4l2c) \ + list_first_entry_or_null(&(v4l2c)->links, \ + struct v4l2_connector_link, head) + +/** v4l2_connector_last_link - Helper macro to get the last + * &struct v4l2_fwnode_connector link + * @v4l2c: &struct v4l2_fwnode_connector owning the connector links + * + * This marco returns the last &struct v4l2_connector_link added connector link. + */ +#define v4l2_connector_last_link(v4l2c) \ + list_last_entry(&(v4l2c)->links, struct v4l2_connector_link, head) + #endif /* _V4L2_FWNODE_H */ diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index 384960249f01..5e73eb8e28f6 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -86,23 +86,30 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q); /** - * v4l2_pipeline_pm_use - Update the use count of an entity - * @entity: The entity - * @use: Use (1) or stop using (0) the entity + * v4l2_pipeline_pm_get - Increase the use count of a pipeline + * @entity: The root entity of a pipeline * - * Update the use count of all entities in the pipeline and power entities on or - * off accordingly. + * Update the use count of all entities in the pipeline and power entities on. * - * This function is intended to be called in video node open (use == - * 1) and release (use == 0). It uses struct media_entity.use_count to - * track the power status. The use of this function should be paired - * with v4l2_pipeline_link_notify(). + * This function is intended to be called in video node open. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). * - * Return 0 on success or a negative error code on failure. Powering entities - * off is assumed to never fail. No failure can occur when the use parameter is - * set to 0. + * Return 0 on success or a negative error code on failure. */ -int v4l2_pipeline_pm_use(struct media_entity *entity, int use); +int v4l2_pipeline_pm_get(struct media_entity *entity); + +/** + * v4l2_pipeline_pm_put - Decrease the use count of a pipeline + * @entity: The root entity of a pipeline + * + * Update the use count of all entities in the pipeline and power entities off. + * + * This function is intended to be called in video node release. It uses + * struct media_entity.use_count to track the power status. The use + * of this function should be paired with v4l2_pipeline_link_notify(). + */ +void v4l2_pipeline_pm_put(struct media_entity *entity); /** @@ -114,7 +121,7 @@ int v4l2_pipeline_pm_use(struct media_entity *entity, int use); * React to link management on powered pipelines by updating the use count of * all entities in the source and sink sides of the link. Entities are powered * on or off accordingly. The use of this function should be paired - * with v4l2_pipeline_pm_use(). + * with v4l2_pipeline_pm_{get,put}(). * * Return 0 on success or a negative error code on failure. Powering entities * off is assumed to never fail. This function will not fail for disconnection @@ -144,11 +151,14 @@ static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q) return 0; } -static inline int v4l2_pipeline_pm_use(struct media_entity *entity, int use) +static inline int v4l2_pipeline_pm_get(struct media_entity *entity) { return 0; } +static inline void v4l2_pipeline_pm_put(struct media_entity *entity) +{} + static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags, unsigned int notification) { diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 1d85e24791e4..98753f00df7e 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -80,6 +80,10 @@ struct v4l2_m2m_queue_ctx { * for an existing frame. This is always true unless * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which * indicates slicing support. + * @is_draining: indicates device is in draining phase + * @last_src_buf: indicate the last source buffer for draining + * @next_buf_last: next capture queud buffer will be tagged as last + * @has_stopped: indicate the device has been stopped * @m2m_dev: opaque pointer to the internal data to handle M2M context * @cap_q_ctx: Capture (output to memory) queue context * @out_q_ctx: Output (input from memory) queue context @@ -98,6 +102,11 @@ struct v4l2_m2m_ctx { bool new_frame; + bool is_draining; + struct vb2_v4l2_buffer *last_src_buf; + bool next_buf_last; + bool has_stopped; + /* internal use only */ struct v4l2_m2m_dev *m2m_dev; @@ -216,6 +225,86 @@ v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) } /** + * v4l2_m2m_clear_state() - clear encoding/decoding state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = false; +} + +/** + * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline void +v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + m2m_ctx->next_buf_last = false; + m2m_ctx->is_draining = false; + m2m_ctx->has_stopped = true; +} + +/** + * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session + * draining management state of next queued capture buffer + * + * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify + * the end of the capture session. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->is_draining && m2m_ctx->next_buf_last; +} + +/** + * v4l2_m2m_has_stopped() - return the current encoding/decoding session + * stopped state + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + */ +static inline bool +v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) +{ + return m2m_ctx->has_stopped; +} + +/** + * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining + * state in the current encoding/decoding session + * + * This will identify the last output buffer queued before a session stop + * was required, leading to an actual encoding/decoding session stop state + * in the encoding/decoding process after being processed. + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +static inline bool +v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; +} + +/** + * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: pointer to struct &v4l2_buffer + */ +void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer * * @file: pointer to struct &file @@ -313,6 +402,46 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type); /** + * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding + * session state when a start of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding + * session state when a stop of streaming of a video queue is requested + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @q: queue + */ +void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_queue *q); + +/** + * v4l2_m2m_encoder_cmd() - execute an encoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @ec: pointer to the encoder command + */ +int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_encoder_cmd *ec); + +/** + * v4l2_m2m_decoder_cmd() - execute a decoder command + * + * @file: pointer to struct &file + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @dc: pointer to the decoder command + */ +int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_decoder_cmd *dc); + +/** * v4l2_m2m_poll() - poll replacement, for destination buffers only * * @file: pointer to struct &file @@ -704,6 +833,10 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, enum v4l2_buf_type type); int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); +int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh, + struct v4l2_encoder_cmd *ec); +int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh, + struct v4l2_decoder_cmd *dc); int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec); int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 761aa83a3f3c..a4848de59852 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1093,7 +1093,7 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; * @f: callback function to be called. * The callback functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. - * @args...: arguments for @f. + * @args: arguments for @f. * * Example: err = v4l2_subdev_call(sd, video, s_std, norm); */ diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index a2b2208b02da..f11b96514cf7 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -509,8 +509,11 @@ struct vb2_buf_ops { * by the vb2 core. * @buf_struct_size: size of the driver-specific buffer structure; * "0" indicates the driver doesn't want to use a custom buffer - * structure type. for example, ``sizeof(struct vb2_v4l2_buffer)`` - * will be used for v4l2. + * structure type. In that case a subsystem-specific struct + * will be used (in the case of V4L2 that is + * ``sizeof(struct vb2_v4l2_buffer)``). The first field of the + * driver-specific buffer structure must be the subsystem-specific + * struct (vb2_v4l2_buffer in the case of V4L2). * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and * ``V4L2_BUF_FLAG_TSTAMP_SRC_*`` * @gfp_flags: additional gfp flags used when allocating the buffers. diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index a71378007e61..c80539be1542 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -138,7 +138,7 @@ struct lowpan_dev { struct lowpan_iphc_ctx_table ctx; /* must be last */ - u8 priv[0] __aligned(sizeof(void *)); + u8 priv[] __aligned(sizeof(void *)); }; struct lowpan_802154_neigh { diff --git a/include/net/9p/client.h b/include/net/9p/client.h index acc60d8a3b3b..dd5b5bd781a4 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -73,7 +73,6 @@ enum p9_req_status_t { * @wq: wait_queue for the client to block on for this request * @tc: the request fcall structure * @rc: the response fcall structure - * @aux: transport specific data (provided for trans_fd migration) * @req_list: link for higher level objects to chain requests */ struct p9_req_t { @@ -83,7 +82,6 @@ struct p9_req_t { wait_queue_head_t wq; struct p9_fcall tc; struct p9_fcall rc; - void *aux; struct list_head req_list; }; @@ -200,6 +198,8 @@ int p9_client_fsync(struct p9_fid *fid, int datasync); int p9_client_remove(struct p9_fid *fid); int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags); int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err); +int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, + int *err); int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err); int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); int p9dirent_read(struct p9_client *clnt, char *buf, int len, diff --git a/include/net/act_api.h b/include/net/act_api.h index 71347a90a9d1..c24d7643548e 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -41,6 +41,9 @@ struct tc_action { struct tc_cookie __rcu *act_cookie; struct tcf_chain __rcu *goto_chain; u32 tcfa_flags; + u8 hw_stats; + u8 used_hw_stats; + bool used_hw_stats_valid; }; #define tcf_index common.tcfa_index #define tcf_refcnt common.tcfa_refcnt @@ -52,6 +55,9 @@ struct tc_action { #define tcf_rate_est common.tcfa_rate_est #define tcf_lock common.tcfa_lock +#define TCA_ACT_HW_STATS_ANY (TCA_ACT_HW_STATS_IMMEDIATE | \ + TCA_ACT_HW_STATS_DELAYED) + /* Update lastuse only if needed, to avoid dirtying a cache line. * We use a temp variable to avoid fetching jiffies twice. */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index a088349dd94f..e0eabe58aa8b 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -90,6 +90,9 @@ int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr); #endif +int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs, + unsigned char nsegs); + bool ipv6_chk_custom_prefix(const struct in6_addr *addr, const unsigned int prefix_len, struct net_device *dev); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 1abae3c340a5..04e97bab6f28 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -16,6 +16,12 @@ struct sock; struct socket; struct rxrpc_call; +enum rxrpc_interruptibility { + RXRPC_INTERRUPTIBLE, /* Call is interruptible */ + RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */ + RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */ +}; + /* * Debug ID counter for tracing. */ @@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, gfp_t, rxrpc_notify_rx_t, bool, - bool, + enum rxrpc_interruptibility, unsigned int); int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, @@ -58,9 +64,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); -bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *, - u32 *); -void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); +bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, ktime_t *); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 17e10fba2152..f42fdddecd41 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -27,7 +27,7 @@ struct unix_address { refcount_t refcnt; int len; unsigned int hash; - struct sockaddr_un name[0]; + struct sockaddr_un name[]; }; struct unix_skb_parms { @@ -42,7 +42,7 @@ struct unix_skb_parms { } __randomize_layout; struct scm_stat { - u32 nr_fds; + atomic_t nr_fds; }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) diff --git a/include/net/bareudp.h b/include/net/bareudp.h new file mode 100644 index 000000000000..cb03f6f15956 --- /dev/null +++ b/include/net/bareudp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __NET_BAREUDP_H +#define __NET_BAREUDP_H + +#include <linux/types.h> +#include <linux/skbuff.h> + +struct bareudp_conf { + __be16 ethertype; + __be16 port; + u16 sport_min; + bool multi_proto_mode; +}; + +struct net_device *bareudp_dev_create(struct net *net, const char *name, + u8 name_assign_type, + struct bareudp_conf *info); + +#endif diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index e42bb8e03c09..1576353a2773 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -121,6 +121,23 @@ struct bt_voice { #define BT_SNDMTU 12 #define BT_RCVMTU 13 +#define BT_PHY 14 + +#define BT_PHY_BR_1M_1SLOT 0x00000001 +#define BT_PHY_BR_1M_3SLOT 0x00000002 +#define BT_PHY_BR_1M_5SLOT 0x00000004 +#define BT_PHY_EDR_2M_1SLOT 0x00000008 +#define BT_PHY_EDR_2M_3SLOT 0x00000010 +#define BT_PHY_EDR_2M_5SLOT 0x00000020 +#define BT_PHY_EDR_3M_1SLOT 0x00000040 +#define BT_PHY_EDR_3M_3SLOT 0x00000080 +#define BT_PHY_EDR_3M_5SLOT 0x00000100 +#define BT_PHY_LE_1M_TX 0x00000200 +#define BT_PHY_LE_1M_RX 0x00000400 +#define BT_PHY_LE_2M_TX 0x00000800 +#define BT_PHY_LE_2M_RX 0x00001000 +#define BT_PHY_LE_CODED_TX 0x00002000 +#define BT_PHY_LE_CODED_RX 0x00004000 __printf(1, 2) void bt_info(const char *fmt, ...); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 6293bdd7d862..5f60e135aeb6 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -115,7 +115,7 @@ enum { * wrongly configured local features that will require forcing * them to enable this mode. Getting RSSI information with the * inquiry responses is preferred since it allows for a better - * user expierence. + * user experience. * * This quirk must be set before hci_register_dev is called. */ @@ -142,7 +142,7 @@ enum { /* When this quirk is set, an external configuration step * is required and will be indicated with the controller - * configuation. + * configuration. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. @@ -205,6 +205,15 @@ enum { * */ HCI_QUIRK_NON_PERSISTENT_SETUP, + + /* When this quirk is set, wide band speech is supported by + * the driver since no reliable mechanism exist to report + * this from the hardware, a driver flag is use to convey + * this support + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, }; /* HCI device flags */ @@ -277,6 +286,7 @@ enum { HCI_FAST_CONNECTABLE, HCI_BREDR_ENABLED, HCI_LE_SCAN_INTERRUPTED, + HCI_WIDEBAND_SPEECH_ENABLED, HCI_DUT_MODE, HCI_VENDOR_DIAG, @@ -932,10 +942,14 @@ struct hci_cp_sniff_subrate { #define HCI_OP_RESET 0x0c03 #define HCI_OP_SET_EVENT_FLT 0x0c05 -struct hci_cp_set_event_flt { - __u8 flt_type; - __u8 cond_type; - __u8 condition[0]; +#define HCI_SET_EVENT_FLT_SIZE 9 +struct hci_cp_set_event_filter { + __u8 flt_type; + __u8 cond_type; + struct { + bdaddr_t bdaddr; + __u8 auto_accept; + } __packed addr_conn_flt; } __packed; /* Filter types */ @@ -949,8 +963,9 @@ struct hci_cp_set_event_flt { #define HCI_CONN_SETUP_ALLOW_BDADDR 0x02 /* CONN_SETUP Conditions */ -#define HCI_CONN_SETUP_AUTO_OFF 0x01 -#define HCI_CONN_SETUP_AUTO_ON 0x02 +#define HCI_CONN_SETUP_AUTO_OFF 0x01 +#define HCI_CONN_SETUP_AUTO_ON 0x02 +#define HCI_CONN_SETUP_AUTO_ON_WITH_RS 0x03 #define HCI_OP_READ_STORED_LINK_KEY 0x0c0d struct hci_cp_read_stored_link_key { @@ -1086,6 +1101,19 @@ struct hci_rp_read_inq_rsp_tx_power { __s8 tx_power; } __packed; +#define HCI_OP_READ_DEF_ERR_DATA_REPORTING 0x0c5a + #define ERR_DATA_REPORTING_DISABLED 0x00 + #define ERR_DATA_REPORTING_ENABLED 0x01 +struct hci_rp_read_def_err_data_reporting { + __u8 status; + __u8 err_data_reporting; +} __packed; + +#define HCI_OP_WRITE_DEF_ERR_DATA_REPORTING 0x0c5b +struct hci_cp_write_def_err_data_reporting { + __u8 err_data_reporting; +} __packed; + #define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63 #define HCI_OP_READ_LOCATION_DATA 0x0c64 @@ -1335,7 +1363,7 @@ struct hci_rp_read_local_amp_assoc { __u8 status; __u8 phy_handle; __le16 rem_len; - __u8 frag[0]; + __u8 frag[]; } __packed; #define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b @@ -1343,7 +1371,7 @@ struct hci_cp_write_remote_amp_assoc { __u8 phy_handle; __le16 len_so_far; __le16 rem_len; - __u8 frag[0]; + __u8 frag[]; } __packed; struct hci_rp_write_remote_amp_assoc { __u8 status; @@ -1613,7 +1641,7 @@ struct hci_cp_le_set_ext_scan_params { __u8 own_addr_type; __u8 filter_policy; __u8 scanning_phys; - __u8 data[0]; + __u8 data[]; } __packed; #define LE_SCAN_PHY_1M 0x01 @@ -1641,7 +1669,7 @@ struct hci_cp_le_ext_create_conn { __u8 peer_addr_type; bdaddr_t peer_addr; __u8 phys; - __u8 data[0]; + __u8 data[]; } __packed; struct hci_cp_le_ext_conn_param { @@ -1693,7 +1721,7 @@ struct hci_rp_le_set_ext_adv_params { struct hci_cp_le_set_ext_adv_enable { __u8 enable; __u8 num_of_sets; - __u8 data[0]; + __u8 data[]; } __packed; struct hci_cp_ext_adv_set { @@ -1724,6 +1752,8 @@ struct hci_cp_le_set_ext_scan_rsp_data { #define LE_SET_ADV_DATA_NO_FRAG 0x01 +#define HCI_OP_LE_REMOVE_ADV_SET 0x203c + #define HCI_OP_LE_CLEAR_ADV_SETS 0x203d #define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035 @@ -1775,14 +1805,14 @@ struct hci_cp_le_set_cig_params { __le16 m_latency; __le16 s_latency; __u8 num_cis; - struct hci_cis_params cis[0]; + struct hci_cis_params cis[]; } __packed; struct hci_rp_le_set_cig_params { __u8 status; __u8 cig_id; __u8 num_handles; - __le16 handle[0]; + __le16 handle[]; } __packed; #define HCI_OP_LE_CREATE_CIS 0x2064 @@ -1793,7 +1823,7 @@ struct hci_cis { struct hci_cp_le_create_cis { __u8 num_cis; - struct hci_cis cis[0]; + struct hci_cis cis[]; } __packed; #define HCI_OP_LE_REMOVE_CIG 0x2065 @@ -1937,7 +1967,7 @@ struct hci_comp_pkts_info { struct hci_ev_num_comp_pkts { __u8 num_hndl; - struct hci_comp_pkts_info handles[0]; + struct hci_comp_pkts_info handles[]; } __packed; #define HCI_EV_MODE_CHANGE 0x14 @@ -2170,7 +2200,7 @@ struct hci_comp_blocks_info { struct hci_ev_num_comp_blocks { __le16 num_blocks; __u8 num_hndl; - struct hci_comp_blocks_info handles[0]; + struct hci_comp_blocks_info handles[]; } __packed; #define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F @@ -2226,7 +2256,7 @@ struct hci_ev_le_advertising_info { __u8 bdaddr_type; bdaddr_t bdaddr; __u8 length; - __u8 data[0]; + __u8 data[]; } __packed; #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 @@ -2302,7 +2332,7 @@ struct hci_ev_le_ext_adv_report { __u8 direct_addr_type; bdaddr_t direct_addr; __u8 length; - __u8 data[0]; + __u8 data[]; } __packed; #define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a @@ -2362,7 +2392,7 @@ struct hci_evt_le_cis_req { #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { __u16 type; - __u8 data[0]; + __u8 data[]; } __packed; #define HCI_EV_SI_DEVICE 0x01 @@ -2409,7 +2439,7 @@ struct hci_sco_hdr { struct hci_iso_hdr { __le16 handle; __le16 dlen; - __u8 data[0]; + __u8 data[]; } __packed; /* ISO data packet status flags */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 89ecf0a80aa1..d4e28773d378 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -88,6 +88,31 @@ struct discovery_state { unsigned long scan_duration; }; +#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ + +enum suspend_tasks { + SUSPEND_PAUSE_DISCOVERY, + SUSPEND_UNPAUSE_DISCOVERY, + + SUSPEND_PAUSE_ADVERTISING, + SUSPEND_UNPAUSE_ADVERTISING, + + SUSPEND_SCAN_DISABLE, + SUSPEND_SCAN_ENABLE, + SUSPEND_DISCONNECTING, + + SUSPEND_POWERING_DOWN, + + SUSPEND_PREPARE_NOTIFIER, + __SUSPEND_NUM_TASKS +}; + +enum suspended_state { + BT_RUNNING = 0, + BT_SUSPEND_DISCONNECT, + BT_SUSPEND_COMPLETE, +}; + struct hci_conn_hash { struct list_head list; unsigned int acl_num; @@ -260,6 +285,7 @@ struct hci_dev { __u8 stored_num_keys; __u8 io_capability; __s8 inq_tx_power; + __u8 err_data_reporting; __u16 page_scan_interval; __u16 page_scan_window; __u8 page_scan_type; @@ -389,11 +415,28 @@ struct hci_dev { void *smp_bredr_data; struct discovery_state discovery; + + int discovery_old_state; + bool discovery_paused; + int advertising_old_state; + bool advertising_paused; + + struct notifier_block suspend_notifier; + struct work_struct suspend_prepare; + enum suspended_state suspend_state_next; + enum suspended_state suspend_state; + bool scanning_paused; + bool suspended; + + wait_queue_head_t suspend_wait_q; + DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS); + struct hci_conn_hash conn_hash; struct list_head mgmt_pending; struct list_head blacklist; struct list_head whitelist; + struct list_head wakeable; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; @@ -575,6 +618,7 @@ struct hci_conn_params { struct hci_conn *conn; bool explicit_connect; + bool wakeable; }; extern struct list_head hci_dev_list; @@ -1477,6 +1521,8 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout); +u32 hci_conn_get_phy(struct hci_conn *conn); + /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 8e9138acdae1..9352bb1bf34c 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -144,19 +144,19 @@ struct hci_dev_req { struct hci_dev_list_req { __u16 dev_num; - struct hci_dev_req dev_req[0]; /* hci_dev_req structures */ + struct hci_dev_req dev_req[]; /* hci_dev_req structures */ }; struct hci_conn_list_req { __u16 dev_id; __u16 conn_num; - struct hci_conn_info conn_info[0]; + struct hci_conn_info conn_info[]; }; struct hci_conn_info_req { bdaddr_t bdaddr; __u8 type; - struct hci_conn_info conn_info[0]; + struct hci_conn_info conn_info[]; }; struct hci_auth_info_req { diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 093aedebdf0c..dada14d0622c 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -47,6 +47,7 @@ #define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF #define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */ #define L2CAP_LE_MIN_MTU 23 +#define L2CAP_ECRED_CONN_SCID_MAX 5 #define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100) #define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000) @@ -119,6 +120,10 @@ struct l2cap_conninfo { #define L2CAP_LE_CONN_REQ 0x14 #define L2CAP_LE_CONN_RSP 0x15 #define L2CAP_LE_CREDITS 0x16 +#define L2CAP_ECRED_CONN_REQ 0x17 +#define L2CAP_ECRED_CONN_RSP 0x18 +#define L2CAP_ECRED_RECONF_REQ 0x19 +#define L2CAP_ECRED_RECONF_RSP 0x1a /* L2CAP extended feature mask */ #define L2CAP_FEAT_FLOWCTL 0x00000001 @@ -290,6 +295,8 @@ struct l2cap_conn_rsp { #define L2CAP_CR_LE_ENCRYPTION 0x0008 #define L2CAP_CR_LE_INVALID_SCID 0x0009 #define L2CAP_CR_LE_SCID_IN_USE 0X000A +#define L2CAP_CR_LE_UNACCEPT_PARAMS 0X000B +#define L2CAP_CR_LE_INVALID_PARAMS 0X000C /* connect/create channel status */ #define L2CAP_CS_NO_INFO 0x0000 @@ -299,14 +306,14 @@ struct l2cap_conn_rsp { struct l2cap_conf_req { __le16 dcid; __le16 flags; - __u8 data[0]; + __u8 data[]; } __packed; struct l2cap_conf_rsp { __le16 scid; __le16 flags; __le16 result; - __u8 data[0]; + __u8 data[]; } __packed; #define L2CAP_CONF_SUCCESS 0x0000 @@ -322,7 +329,7 @@ struct l2cap_conf_rsp { struct l2cap_conf_opt { __u8 type; __u8 len; - __u8 val[0]; + __u8 val[]; } __packed; #define L2CAP_CONF_OPT_SIZE 2 @@ -359,6 +366,7 @@ struct l2cap_conf_rfc { * ever be used in the BR/EDR configuration phase. */ #define L2CAP_MODE_LE_FLOWCTL 0x80 +#define L2CAP_MODE_EXT_FLOWCTL 0x81 struct l2cap_conf_efs { __u8 id; @@ -392,7 +400,7 @@ struct l2cap_info_req { struct l2cap_info_rsp { __le16 type; __le16 result; - __u8 data[0]; + __u8 data[]; } __packed; struct l2cap_create_chan_req { @@ -483,6 +491,39 @@ struct l2cap_le_credits { __le16 credits; } __packed; +#define L2CAP_ECRED_MIN_MTU 64 +#define L2CAP_ECRED_MIN_MPS 64 + +struct l2cap_ecred_conn_req { + __le16 psm; + __le16 mtu; + __le16 mps; + __le16 credits; + __le16 scid[0]; +} __packed; + +struct l2cap_ecred_conn_rsp { + __le16 mtu; + __le16 mps; + __le16 credits; + __le16 result; + __le16 dcid[0]; +}; + +struct l2cap_ecred_reconf_req { + __le16 mtu; + __le16 mps; + __le16 scid[0]; +} __packed; + +#define L2CAP_RECONF_SUCCESS 0x0000 +#define L2CAP_RECONF_INVALID_MTU 0x0001 +#define L2CAP_RECONF_INVALID_MPS 0x0002 + +struct l2cap_ecred_reconf_rsp { + __le16 result; +} __packed; + /* ----- L2CAP channels and connections ----- */ struct l2cap_seq_list { __u16 head; @@ -620,6 +661,7 @@ struct l2cap_ops { void (*suspend) (struct l2cap_chan *chan); void (*set_shutdown) (struct l2cap_chan *chan); long (*get_sndtimeo) (struct l2cap_chan *chan); + struct pid *(*get_peer_pid) (struct l2cap_chan *chan); struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); @@ -724,6 +766,7 @@ enum { FLAG_EFS_ENABLE, FLAG_DEFER_SETUP, FLAG_LE_CONN_REQ_SENT, + FLAG_ECRED_CONN_REQ_SENT, FLAG_PENDING_SECURITY, FLAG_HOLD_HCI_CONN, }; @@ -917,12 +960,14 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan) } extern bool disable_ertm; +extern bool enable_ecred; int l2cap_init_sockets(void); void l2cap_cleanup_sockets(void); bool l2cap_is_socket(struct socket *sock); void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan); +void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan); void __l2cap_connect_rsp_defer(struct l2cap_chan *chan); int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm); @@ -932,6 +977,7 @@ struct l2cap_chan *l2cap_chan_create(void); void l2cap_chan_close(struct l2cap_chan *chan, int reason); int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst, u8 dst_type); +int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu); int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len); void l2cap_chan_busy(struct l2cap_chan *chan, int busy); int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator); @@ -939,6 +985,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan); int l2cap_ertm_init(struct l2cap_chan *chan); void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); +typedef void (*l2cap_chan_func_t)(struct l2cap_chan *chan, void *data); +void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, + void *data); void l2cap_chan_del(struct l2cap_chan *chan, int err); void l2cap_send_conn_req(struct l2cap_chan *chan); void l2cap_move_start(struct l2cap_chan *chan); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index a90666af05bd..f41cd87550dc 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -101,7 +101,8 @@ struct mgmt_rp_read_index_list { #define MGMT_SETTING_PRIVACY 0x00002000 #define MGMT_SETTING_CONFIGURATION 0x00004000 #define MGMT_SETTING_STATIC_ADDRESS 0x00008000 -#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000 +#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000 +#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000 #define MGMT_OP_READ_INFO 0x0004 #define MGMT_READ_INFO_SIZE 0 @@ -671,6 +672,8 @@ struct mgmt_cp_set_blocked_keys { } __packed; #define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2 +#define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index da4acefe39c8..99d26879b02a 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -34,7 +34,6 @@ #define RFCOMM_DEFAULT_MTU 127 #define RFCOMM_DEFAULT_CREDITS 7 -#define RFCOMM_MAX_L2CAP_MTU 1013 #define RFCOMM_MAX_CREDITS 40 #define RFCOMM_SKB_HEAD_RESERVE 8 @@ -356,7 +355,7 @@ struct rfcomm_dev_info { struct rfcomm_dev_list_req { u16 dev_num; - struct rfcomm_dev_info dev_info[0]; + struct rfcomm_dev_info dev_info[]; }; int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); diff --git a/include/net/bonding.h b/include/net/bonding.h index 3d56b026bb9e..dc2ce31a1f52 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -183,7 +183,7 @@ struct slave { struct bond_up_slave { unsigned int count; struct rcu_head rcu; - struct slave *arr[0]; + struct slave *arr[]; }; /* diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h index 8e4f831d2e52..5036c94c0503 100644 --- a/include/net/bpf_sk_storage.h +++ b/include/net/bpf_sk_storage.h @@ -10,14 +10,41 @@ void bpf_sk_storage_free(struct sock *sk); extern const struct bpf_func_proto bpf_sk_storage_get_proto; extern const struct bpf_func_proto bpf_sk_storage_delete_proto; +struct bpf_sk_storage_diag; +struct sk_buff; +struct nlattr; +struct sock; + #ifdef CONFIG_BPF_SYSCALL int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); +struct bpf_sk_storage_diag * +bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs); +void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag); +int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, + struct sock *sk, struct sk_buff *skb, + int stg_array_type, + unsigned int *res_diag_size); #else static inline int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) { return 0; } +static inline struct bpf_sk_storage_diag * +bpf_sk_storage_diag_alloc(const struct nlattr *nla) +{ + return NULL; +} +static inline void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag) +{ +} +static inline int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, + struct sock *sk, struct sk_buff *skb, + int stg_array_type, + unsigned int *res_diag_size) +{ + return 0; +} #endif #endif /* _BPF_SK_STORAGE_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f22bd6c838a3..c78bd4ff9e33 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7,7 +7,7 @@ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation */ #include <linux/netdevice.h> @@ -72,12 +72,12 @@ struct wiphy; * * @IEEE80211_CHAN_DISABLED: This channel is disabled. * @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes - * sending probe requests or beaconing. + * sending probe requests or beaconing. * @IEEE80211_CHAN_RADAR: Radar detection is required on this channel. * @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel - * is not permitted. + * is not permitted. * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel - * is not permitted. + * is not permitted. * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. * @IEEE80211_CHAN_NO_80MHZ: If the driver supports 80 MHz on the band, * this flag indicates that an 80 MHz channel cannot use this @@ -95,6 +95,7 @@ struct wiphy; * on this channel. * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted * on this channel. + * @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel. * */ enum ieee80211_channel_flags { @@ -111,6 +112,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_IR_CONCURRENT = 1<<10, IEEE80211_CHAN_NO_20MHZ = 1<<11, IEEE80211_CHAN_NO_10MHZ = 1<<12, + IEEE80211_CHAN_NO_HE = 1<<13, }; #define IEEE80211_CHAN_NO_HT40 \ @@ -260,6 +262,32 @@ struct ieee80211_he_obss_pd { }; /** + * struct cfg80211_he_bss_color - AP settings for BSS coloring + * + * @color: the current color. + * @disabled: is the feature disabled. + * @partial: define the AID equation. + */ +struct cfg80211_he_bss_color { + u8 color; + bool disabled; + bool partial; +}; + +/** + * struct ieee80211_he_bss_color - AP settings for BSS coloring + * + * @color: the current color. + * @disabled: is the feature disabled. + * @partial: define the AID equation. + */ +struct ieee80211_he_bss_color { + u8 color; + bool disabled; + bool partial; +}; + +/** * struct ieee80211_sta_ht_cap - STA's HT capabilities * * This structure describes most essential parameters needed @@ -599,6 +627,41 @@ struct cfg80211_chan_def { }; /** + * struct cfg80211_tid_cfg - TID specific configuration + * @config_override: Flag to notify driver to reset TID configuration + * of the peer. + * @tids: bitmap of TIDs to modify + * @mask: bitmap of attributes indicating which parameter changed, + * similar to &nl80211_tid_config_supp. + * @noack: noack configuration value for the TID + * @retry_long: retry count value + * @retry_short: retry count value + * @ampdu: Enable/Disable aggregation + * @rtscts: Enable/Disable RTS/CTS + */ +struct cfg80211_tid_cfg { + bool config_override; + u8 tids; + u32 mask; + enum nl80211_tid_config noack; + u8 retry_long, retry_short; + enum nl80211_tid_config ampdu; + enum nl80211_tid_config rtscts; +}; + +/** + * struct cfg80211_tid_config - TID configuration + * @peer: Station's MAC address + * @n_tid_conf: Number of TID specific configurations to be applied + * @tid_conf: Configuration change info + */ +struct cfg80211_tid_config { + const u8 *peer; + u32 n_tid_conf; + struct cfg80211_tid_cfg tid_conf[]; +}; + +/** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition * @@ -861,6 +924,7 @@ struct cfg80211_crypto_settings { __be16 control_port_ethertype; bool control_port_no_encrypt; bool control_port_over_nl80211; + bool control_port_no_preauth; struct key_params *wep_keys; int wep_tx_key; const u8 *psk; @@ -990,6 +1054,8 @@ enum cfg80211_ap_settings_flags { * @twt_responder: Enable Target Wait Time * @flags: flags, as defined in enum cfg80211_ap_settings_flags * @he_obss_pd: OBSS Packet Detection settings + * @he_bss_color: BSS Color settings + * @he_oper: HE operation IE (or %NULL if HE isn't enabled) */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -1014,10 +1080,12 @@ struct cfg80211_ap_settings { const struct ieee80211_ht_cap *ht_cap; const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; + const struct ieee80211_he_operation *he_oper; bool ht_required, vht_required; bool twt_responder; u32 flags; struct ieee80211_he_obss_pd he_obss_pd; + struct cfg80211_he_bss_color he_bss_color; }; /** @@ -1656,7 +1724,7 @@ struct mpath_info { * @basic_rates_len: number of basic rates * @ap_isolate: do not forward packets between connected stations * @ht_opmode: HT Operation mode - * (u16 = opmode, -1 = do not change) + * (u16 = opmode, -1 = do not change) * @p2p_ctwindow: P2P CT Window (-1 = no change) * @p2p_opp_ps: P2P opportunistic PS (-1 = no change) */ @@ -2032,8 +2100,8 @@ struct cfg80211_bss_select_adjust { * @ie_len: length of ie in octets * @flags: bit field of flags controlling operation * @match_sets: sets of parameters to be matched for a scan result - * entry to be considered valid and to be passed to the host - * (others are filtered out). + * entry to be considered valid and to be passed to the host + * (others are filtered out). * If ommited, all results are passed. * @n_match_sets: number of match sets * @report_results: indicates that results were reported for this request @@ -2426,7 +2494,7 @@ struct cfg80211_disassoc_request { * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. * @wep_keys: static WEP keys, if not NULL points to an array of - * CFG80211_MAX_WEP_KEYS WEP keys + * CFG80211_MAX_WEP_KEYS WEP keys * @wep_tx_key: key index (0..3) of the default TX static WEP key */ struct cfg80211_ibss_params { @@ -2631,6 +2699,17 @@ enum wiphy_params_flags { * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be * %NULL). + * @pmk_lifetime: Maximum lifetime for PMKSA in seconds + * (dot11RSNAConfigPMKLifetime) or 0 if not specified. + * The configured PMKSA must not be used for PMKSA caching after + * expiration and any keys derived from this PMK become invalid on + * expiration, i.e., the current association must be dropped if the PMK + * used for it expires. + * @pmk_reauth_threshold: Threshold time for reauthentication (percentage of + * PMK lifetime, dot11RSNAConfigPMKReauthThreshold) or 0 if not specified. + * Drivers are expected to trigger a full authentication instead of using + * this PMKSA for caching when reassociating to a new BSS after this + * threshold to generate a new PMK before the current one expires. */ struct cfg80211_pmksa { const u8 *bssid; @@ -2640,6 +2719,8 @@ struct cfg80211_pmksa { const u8 *ssid; size_t ssid_len; const u8 *cache_id; + u32 pmk_lifetime; + u8 pmk_reauth_threshold; }; /** @@ -3204,6 +3285,12 @@ struct cfg80211_pmsr_result { * @ftmr_retries: number of retries for FTM request * @request_lci: request LCI information * @request_civicloc: request civic location information + * @trigger_based: use trigger based ranging for the measurement + * If neither @trigger_based nor @non_trigger_based is set, + * EDCA based ranging will be used. + * @non_trigger_based: use non trigger based ranging for the measurement + * If neither @trigger_based nor @non_trigger_based is set, + * EDCA based ranging will be used. * * See also nl80211 for the respective attribute documentation. */ @@ -3213,7 +3300,9 @@ struct cfg80211_pmsr_ftm_request_peer { u8 requested:1, asap:1, request_lci:1, - request_civicloc:1; + request_civicloc:1, + trigger_based:1, + non_trigger_based:1; u8 num_bursts_exp; u8 burst_duration; u8 ftms_per_burst; @@ -3340,6 +3429,8 @@ struct cfg80211_update_owe_info { * * @set_default_mgmt_key: set the default management frame key on an interface * + * @set_default_beacon_key: set the default Beacon frame key on an interface + * * @set_rekey_data: give the data necessary for GTK rekeying to the driver * * @start_ap: Start acting in AP mode defined by the parameters. @@ -3639,6 +3730,10 @@ struct cfg80211_update_owe_info { * * @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame * and overrule HWMP path selection algorithm. + * @set_tid_config: TID specific configuration, this can be peer or BSS specific + * This callback may sleep. + * @reset_tid_config: Reset TID specific configuration for the peer, for the + * given TIDs. This callback may sleep. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -3672,6 +3767,9 @@ struct cfg80211_ops { int (*set_default_mgmt_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index); + int (*set_default_beacon_key)(struct wiphy *wiphy, + struct net_device *netdev, + u8 key_index); int (*start_ap)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings); @@ -3959,6 +4057,10 @@ struct cfg80211_ops { struct cfg80211_update_owe_info *owe_info); int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len); + int (*set_tid_config)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_tid_config *tid_conf); + int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u8 tids); }; /* @@ -4356,6 +4458,8 @@ struct wiphy_iftype_ext_capab { * forbid using the value 15 to let the responder pick) * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if * not limited) + * @ftm.trigger_based: trigger based ranging measurement is supported + * @ftm.non_trigger_based: non trigger based ranging measurement is supported */ struct cfg80211_pmsr_capabilities { unsigned int max_peers; @@ -4371,24 +4475,49 @@ struct cfg80211_pmsr_capabilities { asap:1, non_asap:1, request_lci:1, - request_civicloc:1; + request_civicloc:1, + trigger_based:1, + non_trigger_based:1; } ftm; }; /** + * struct wiphy_iftype_akm_suites - This structure encapsulates supported akm + * suites for interface types defined in @iftypes_mask. Each type in the + * @iftypes_mask must be unique across all instances of iftype_akm_suites. + * + * @iftypes_mask: bitmask of interfaces types + * @akm_suites: points to an array of supported akm suites + * @n_akm_suites: number of supported AKM suites + */ +struct wiphy_iftype_akm_suites { + u16 iftypes_mask; + const u32 *akm_suites; + int n_akm_suites; +}; + +/** * struct wiphy - wireless hardware description * @reg_notifier: the driver's regulatory notification callback, * note that if your driver uses wiphy_apply_custom_regulatory() * the reg_notifier's request can be passed as NULL * @regd: the driver's regulatory domain, if one was requested via - * the regulatory_hint() API. This can be used by the driver + * the regulatory_hint() API. This can be used by the driver * on the reg_notifier() if it chooses to ignore future * regulatory domain changes caused by other drivers. * @signal_type: signal type reported in &struct cfg80211_bss. * @cipher_suites: supported cipher suites * @n_cipher_suites: number of supported cipher suites - * @akm_suites: supported AKM suites + * @akm_suites: supported AKM suites. These are the default AKMs supported if + * the supported AKMs not advertized for a specific interface type in + * iftype_akm_suites. * @n_akm_suites: number of supported AKM suites + * @iftype_akm_suites: array of supported akm suites info per interface type. + * Note that the bits in @iftypes_mask inside this structure cannot + * overlap (i.e. only one occurrence of each type is allowed across all + * instances of iftype_akm_suites). + * @num_iftype_akm_suites: number of interface types for which supported akm + * suites are specified separately. * @retry_short: Retry limit for short frames (dot11ShortRetryLimit) * @retry_long: Retry limit for long frames (dot11LongRetryLimit) * @frag_threshold: Fragmentation threshold (dot11FragmentationThreshold); @@ -4409,10 +4538,11 @@ struct cfg80211_pmsr_capabilities { * the same number of arbitrary MAC addresses. * @registered: protects ->resume and ->suspend sysfs callbacks against * unregister hardware - * @debugfsdir: debugfs directory used for this wiphy, will be renamed - * automatically on wiphy renames - * @dev: (virtual) struct device for this wiphy - * @registered: helps synchronize suspend/resume with wiphy unregister + * @debugfsdir: debugfs directory used for this wiphy (ieee80211/<wiphyname>). + * It will be renamed automatically on wiphy renames + * @dev: (virtual) struct device for this wiphy. The item in + * /sys/class/ieee80211/ points to this. You need use set_wiphy_dev() + * (see below). * @wext: wireless extension handlers * @priv: driver private data (sized according to wiphy_new() parameter) * @interface_modes: bitmask of interfaces types valid for this wiphy, @@ -4523,12 +4653,6 @@ struct cfg80211_pmsr_capabilities { * and probe responses. This value should be set if the driver * wishes to limit the number of csa counters. Default (0) means * infinite. - * @max_adj_channel_rssi_comp: max offset of between the channel on which the - * frame was sent and the channel on which the frame was heard for which - * the reported rssi is still valid. If a driver is able to compensate the - * low rssi when a frame is heard on different channel, then it should set - * this variable to the maximal offset for which it can compensate. - * This value should be set in MHz. * @bss_select_support: bitmask indicating the BSS selection criteria supported * by the driver in the .connect() callback. The bit position maps to the * attribute indices defined in &enum nl80211_bss_select_attr. @@ -4548,11 +4672,19 @@ struct cfg80211_pmsr_capabilities { * @support_mbssid must be set for this to have any effect. * * @pmsr_capa: peer measurement capabilities + * + * @tid_config_support: describes the per-TID config support that the + * device has + * @tid_config_support.vif: bitmap of attributes (configurations) + * supported by the driver for each vif + * @tid_config_support.peer: bitmap of attributes (configurations) + * supported by the driver for each peer + * @tid_config_support.max_retry: maximum supported retry count for + * long/short retry configuration */ struct wiphy { /* assign these fields before you register the wiphy */ - /* permanent MAC address(es) */ u8 perm_addr[ETH_ALEN]; u8 addr_mask[ETH_ALEN]; @@ -4595,6 +4727,9 @@ struct wiphy { int n_akm_suites; const u32 *akm_suites; + const struct wiphy_iftype_akm_suites *iftype_akm_suites; + unsigned int num_iftype_akm_suites; + u8 retry_short; u8 retry_long; u32 frag_threshold; @@ -4616,11 +4751,6 @@ struct wiphy { u32 available_antennas_tx; u32 available_antennas_rx; - /* - * Bitmap of supported protocols for probe response offloading - * see &enum nl80211_probe_resp_offload_support_attr. Only valid - * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set. - */ u32 probe_resp_offload; const u8 *extended_capabilities, *extended_capabilities_mask; @@ -4629,16 +4759,10 @@ struct wiphy { const struct wiphy_iftype_ext_capab *iftype_ext_capab; unsigned int num_iftype_ext_capab; - /* If multiple wiphys are registered and you're handed e.g. - * a regular netdev with assigned ieee80211_ptr, you won't - * know whether it points to a wiphy your driver has registered - * or not. Assign this to something global to your driver to - * help determine whether you own this wiphy or not. */ const void *privid; struct ieee80211_supported_band *bands[NUM_NL80211_BANDS]; - /* Lets us get back the wiphy on the callback */ void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request); @@ -4646,14 +4770,10 @@ struct wiphy { const struct ieee80211_regdomain __rcu *regd; - /* the item in /sys/class/ieee80211/ points to this, - * you need use set_wiphy_dev() (see below) */ struct device dev; - /* protects ->resume, ->suspend sysfs callbacks against unregister hw */ bool registered; - /* dir in debugfs: ieee80211/<wiphyname> */ struct dentry *debugfsdir; const struct ieee80211_ht_cap *ht_capa_mod_mask; @@ -4661,7 +4781,6 @@ struct wiphy { struct list_head wdev_list; - /* the network namespace this phy lives in currently */ possible_net_t _net; #ifdef CONFIG_CFG80211_WEXT @@ -4677,7 +4796,6 @@ struct wiphy { u16 max_ap_assoc_sta; u8 max_num_csa_counters; - u8 max_adj_channel_rssi_comp; u32 bss_select_support; @@ -4687,11 +4805,20 @@ struct wiphy { u32 txq_memory_limit; u32 txq_quantum; + unsigned long tx_queue_len; + u8 support_mbssid:1, support_only_he_mbssid:1; const struct cfg80211_pmsr_capabilities *pmsr_capa; + struct { + u64 peer, vif; + u8 max_retry; + } tid_config_support; + + u8 max_data_retry_count; + char priv[0] __aligned(NETDEV_ALIGN); }; @@ -5467,9 +5594,9 @@ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr); * @wiphy: the wireless device giving the hint (used only for reporting * conflicts) * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain - * should be in. If @rd is set this should be NULL. Note that if you - * set this to NULL you should still set rd->alpha2 to some accepted - * alpha2. + * should be in. If @rd is set this should be NULL. Note that if you + * set this to NULL you should still set rd->alpha2 to some accepted + * alpha2. * * Wireless drivers can use this function to hint to the wireless core * what it believes should be the current regulatory domain by diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 6f86073a5d7d..6ed07844eb24 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -214,7 +214,7 @@ struct wpan_phy { /* the network namespace this phy lives in currently */ possible_net_t _net; - char priv[0] __aligned(NETDEV_ALIGN); + char priv[] __aligned(NETDEV_ALIGN); }; static inline struct net *wpan_phy_net(struct wpan_phy *wpan_phy) diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 4295de3e6a4b..7e78e7d6f015 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -45,9 +45,14 @@ static inline void sock_update_classid(struct sock_cgroup_data *skcd) sock_cgroup_set_classid(skcd, classid); } +static inline u32 __task_get_classid(struct task_struct *task) +{ + return task_cls_state(task)->classid; +} + static inline u32 task_get_classid(const struct sk_buff *skb) { - u32 classid = task_cls_state(current)->classid; + u32 classid = __task_get_classid(current); /* Due to the nature of the classifier it is required to ignore all * packets originating from softirq context as accessing `current' diff --git a/include/net/compat.h b/include/net/compat.h index f277653c7e17..e341260642fe 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -38,6 +38,9 @@ struct compat_cmsghdr { #define compat_mmsghdr mmsghdr #endif /* defined(CONFIG_COMPAT) */ +int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg, + struct sockaddr __user **save_addr, compat_uptr_t *ptr, + compat_size_t *len); int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); diff --git a/include/net/devlink.h b/include/net/devlink.h index ce5cea428fdc..8ffc1b5cd89b 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -16,7 +16,9 @@ #include <linux/workqueue.h> #include <linux/refcount.h> #include <net/net_namespace.h> +#include <net/flow_offload.h> #include <uapi/linux/devlink.h> +#include <linux/xarray.h> struct devlink_ops; @@ -28,13 +30,14 @@ struct devlink { struct list_head resource_list; struct list_head param_list; struct list_head region_list; - u32 snapshot_id; struct list_head reporter_list; struct mutex reporters_lock; /* protects reporter_list */ struct devlink_dpipe_headers *dpipe_headers; struct list_head trap_list; struct list_head trap_group_list; + struct list_head trap_policer_list; const struct devlink_ops *ops; + struct xarray snapshot_ids; struct device *dev; possible_net_t _net; struct mutex lock; @@ -479,6 +482,8 @@ enum devlink_param_generic_id { #define DEVLINK_INFO_VERSION_GENERIC_FW "fw" /* Control processor FW version */ #define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt" +/* FW interface specification version */ +#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api" /* Data path microcode controlling high-speed packet processing */ #define DEVLINK_INFO_VERSION_GENERIC_FW_APP "fw.app" /* UNDI software version */ @@ -489,11 +494,27 @@ enum devlink_param_generic_id { #define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid" /* RoCE FW version */ #define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce" +/* Firmware bundle identifier */ +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" struct devlink_region; struct devlink_info_req; -typedef void devlink_snapshot_data_dest_t(const void *data); +/** + * struct devlink_region_ops - Region operations + * @name: region name + * @destructor: callback used to free snapshot memory when deleting + * @snapshot: callback to request an immediate snapshot. On success, + * the data variable must be updated to point to the snapshot data. + * The function will be called while the devlink instance lock is + * held. + */ +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); + int (*snapshot)(struct devlink *devlink, struct netlink_ext_ack *extack, + u8 **data); +}; struct devlink_fmsg; struct devlink_health_reporter; @@ -526,10 +547,34 @@ struct devlink_health_reporter_ops { }; /** + * struct devlink_trap_policer - Immutable packet trap policer attributes. + * @id: Policer identifier. + * @init_rate: Initial rate in packets / sec. + * @init_burst: Initial burst size in packets. + * @max_rate: Maximum rate. + * @min_rate: Minimum rate. + * @max_burst: Maximum burst size. + * @min_burst: Minimum burst size. + * + * Describes immutable attributes of packet trap policers that drivers register + * with devlink. + */ +struct devlink_trap_policer { + u32 id; + u64 init_rate; + u64 init_burst; + u64 max_rate; + u64 min_rate; + u64 max_burst; + u64 min_burst; +}; + +/** * struct devlink_trap_group - Immutable packet trap group attributes. * @name: Trap group name. * @id: Trap group identifier. * @generic: Whether the trap group is generic or not. + * @init_policer_id: Initial policer identifier. * * Describes immutable attributes of packet trap groups that drivers register * with devlink. @@ -538,9 +583,11 @@ struct devlink_trap_group { const char *name; u16 id; bool generic; + u32 init_policer_id; }; #define DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT BIT(0) +#define DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE BIT(1) /** * struct devlink_trap - Immutable packet trap attributes. @@ -549,7 +596,7 @@ struct devlink_trap_group { * @generic: Whether the trap is generic or not. * @id: Trap identifier. * @name: Trap name. - * @group: Immutable packet trap group attributes. + * @init_group_id: Initial group identifier. * @metadata_cap: Metadata types that can be provided by the trap. * * Describes immutable attributes of packet traps that drivers register with @@ -561,7 +608,7 @@ struct devlink_trap { bool generic; u16 id; const char *name; - struct devlink_trap_group group; + u16 init_group_id; u32 metadata_cap; }; @@ -596,6 +643,8 @@ enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE, DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR, DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC, + DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP, + DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP, /* Add new generic trap IDs above */ __DEVLINK_TRAP_GENERIC_ID_MAX, @@ -610,6 +659,7 @@ enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS, /* Add new generic trap group IDs above */ __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX, @@ -671,6 +721,10 @@ enum devlink_trap_group_generic_id { "decap_error" #define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \ "overlay_smac_is_mc" +#define DEVLINK_TRAP_GENERIC_NAME_INGRESS_FLOW_ACTION_DROP \ + "ingress_flow_action_drop" +#define DEVLINK_TRAP_GENERIC_NAME_EGRESS_FLOW_ACTION_DROP \ + "egress_flow_action_drop" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ "l2_drops" @@ -680,19 +734,22 @@ enum devlink_trap_group_generic_id { "buffer_drops" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \ "tunnel_drops" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_DROPS \ + "acl_drops" -#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \ +#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \ + _metadata_cap) \ { \ .type = DEVLINK_TRAP_TYPE_##_type, \ .init_action = DEVLINK_TRAP_ACTION_##_init_action, \ .generic = true, \ .id = DEVLINK_TRAP_GENERIC_ID_##_id, \ .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \ - .group = _group, \ + .init_group_id = _group_id, \ .metadata_cap = _metadata_cap, \ } -#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group, \ +#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group_id, \ _metadata_cap) \ { \ .type = DEVLINK_TRAP_TYPE_##_type, \ @@ -700,15 +757,28 @@ enum devlink_trap_group_generic_id { .generic = false, \ .id = _id, \ .name = _name, \ - .group = _group, \ + .init_group_id = _group_id, \ .metadata_cap = _metadata_cap, \ } -#define DEVLINK_TRAP_GROUP_GENERIC(_id) \ +#define DEVLINK_TRAP_GROUP_GENERIC(_id, _policer_id) \ { \ .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \ .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \ .generic = true, \ + .init_policer_id = _policer_id, \ + } + +#define DEVLINK_TRAP_POLICER(_id, _rate, _burst, _max_rate, _min_rate, \ + _max_burst, _min_burst) \ + { \ + .id = _id, \ + .init_rate = _rate, \ + .init_burst = _burst, \ + .max_rate = _max_rate, \ + .min_rate = _min_rate, \ + .max_burst = _max_burst, \ + .min_burst = _min_burst, \ } struct devlink_ops { @@ -807,6 +877,47 @@ struct devlink_ops { */ int (*trap_group_init)(struct devlink *devlink, const struct devlink_trap_group *group); + /** + * @trap_group_set: Trap group parameters set function. + * + * Note: @policer can be NULL when a policer is being unbound from + * @group. + */ + int (*trap_group_set)(struct devlink *devlink, + const struct devlink_trap_group *group, + const struct devlink_trap_policer *policer); + /** + * @trap_policer_init: Trap policer initialization function. + * + * Should be used by device drivers to initialize the trap policer in + * the underlying device. + */ + int (*trap_policer_init)(struct devlink *devlink, + const struct devlink_trap_policer *policer); + /** + * @trap_policer_fini: Trap policer de-initialization function. + * + * Should be used by device drivers to de-initialize the trap policer + * in the underlying device. + */ + void (*trap_policer_fini)(struct devlink *devlink, + const struct devlink_trap_policer *policer); + /** + * @trap_policer_set: Trap policer parameters set function. + */ + int (*trap_policer_set)(struct devlink *devlink, + const struct devlink_trap_policer *policer, + u64 rate, u64 burst, + struct netlink_ext_ack *extack); + /** + * @trap_policer_counter_get: Trap policer counter get function. + * + * Should be used by device drivers to report number of packets dropped + * by the policer. + */ + int (*trap_policer_counter_get)(struct devlink *devlink, + const struct devlink_trap_policer *policer, + u64 *p_drops); }; static inline void *devlink_priv(struct devlink *devlink) @@ -949,15 +1060,15 @@ void devlink_port_param_value_changed(struct devlink_port *devlink_port, u32 param_id); void devlink_param_value_str_fill(union devlink_param_value *dst_val, const char *src); -struct devlink_region *devlink_region_create(struct devlink *devlink, - const char *region_name, - u32 region_max_snapshots, - u64 region_size); +struct devlink_region * +devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size); void devlink_region_destroy(struct devlink_region *region); -u32 devlink_region_snapshot_id_get(struct devlink *devlink); +int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id); +void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id); int devlink_region_snapshot_create(struct devlink_region *region, - u8 *data, u32 snapshot_id, - devlink_snapshot_data_dest_t *data_destructor); + u8 *data, u32 snapshot_id); int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn); int devlink_info_driver_name_put(struct devlink_info_req *req, @@ -981,12 +1092,17 @@ int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg); int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg, const char *name); int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg); +int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, + const char *name); +int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg); int devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value); int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value); int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value); int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value); int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value); +int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, + u16 value_len); int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, bool value); @@ -1004,8 +1120,7 @@ int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name, struct devlink_health_reporter * devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, - u64 graceful_period, bool auto_recover, - void *priv); + u64 graceful_period, void *priv); void devlink_health_reporter_destroy(struct devlink_health_reporter *reporter); @@ -1035,10 +1150,24 @@ int devlink_traps_register(struct devlink *devlink, void devlink_traps_unregister(struct devlink *devlink, const struct devlink_trap *traps, size_t traps_count); -void devlink_trap_report(struct devlink *devlink, - struct sk_buff *skb, void *trap_ctx, - struct devlink_port *in_devlink_port); +void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb, + void *trap_ctx, struct devlink_port *in_devlink_port, + const struct flow_action_cookie *fa_cookie); void *devlink_trap_ctx_priv(void *trap_ctx); +int devlink_trap_groups_register(struct devlink *devlink, + const struct devlink_trap_group *groups, + size_t groups_count); +void devlink_trap_groups_unregister(struct devlink *devlink, + const struct devlink_trap_group *groups, + size_t groups_count); +int +devlink_trap_policers_register(struct devlink *devlink, + const struct devlink_trap_policer *policers, + size_t policers_count); +void +devlink_trap_policers_unregister(struct devlink *devlink, + const struct devlink_trap_policer *policers, + size_t policers_count); #if IS_ENABLED(CONFIG_NET_DEVLINK) diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index 6dd2213c5eb2..ccc6e9df178b 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -90,7 +90,7 @@ struct dn_fib_table { int (*flush)(struct dn_fib_table *t); int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb); - unsigned char data[0]; + unsigned char data[]; }; #ifdef CONFIG_DECNET_ROUTER diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h index 2ab668461463..3f5b6ddb3179 100644 --- a/include/net/drop_monitor.h +++ b/include/net/drop_monitor.h @@ -6,20 +6,23 @@ #include <linux/ktime.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <net/flow_offload.h> /** * struct net_dm_hw_metadata - Hardware-supplied packet metadata. * @trap_group_name: Hardware trap group name. * @trap_name: Hardware trap name. * @input_dev: Input netdevice. + * @fa_cookie: Flow action user cookie. */ struct net_dm_hw_metadata { const char *trap_group_name; const char *trap_name; struct net_device *input_dev; + const struct flow_action_cookie *fa_cookie; }; -#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) +#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR) void net_dm_hw_report(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata); #else diff --git a/include/net/dsa.h b/include/net/dsa.h index 63495e3443ac..fb3f9222f2a1 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -130,9 +130,10 @@ struct dsa_switch_tree { struct list_head rtable; }; -/* TC matchall action types, only mirroring for now */ +/* TC matchall action types */ enum dsa_port_mall_action_type { DSA_PORT_MALL_MIRROR, + DSA_PORT_MALL_POLICER, }; /* TC mirroring entry */ @@ -141,6 +142,12 @@ struct dsa_mall_mirror_tc_entry { bool ingress; }; +/* TC port policer entry */ +struct dsa_mall_policer_tc_entry { + s64 burst; + u64 rate_bytes_per_sec; +}; + /* TC matchall entry */ struct dsa_mall_tc_entry { struct list_head list; @@ -148,6 +155,7 @@ struct dsa_mall_tc_entry { enum dsa_port_mall_action_type type; union { struct dsa_mall_mirror_tc_entry mirror; + struct dsa_mall_policer_tc_entry policer; }; }; @@ -284,6 +292,12 @@ struct dsa_switch { */ bool pcs_poll; + /* For switches that only have the MRU configurable. To ensure the + * configured MTU is not exceeded, normalization of MRU on all bridged + * interfaces is needed. + */ + bool mtu_enforcement_ingress; + size_t num_ports; }; @@ -420,7 +434,9 @@ struct dsa_switch_ops { void (*phylink_mac_link_up)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev); + struct phy_device *phydev, + int speed, int duplex, + bool tx_pause, bool rx_pause); void (*phylink_fixed_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); /* @@ -538,11 +554,20 @@ struct dsa_switch_ops { /* * TC integration */ + int (*cls_flower_add)(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); + int (*cls_flower_del)(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); + int (*cls_flower_stats)(struct dsa_switch *ds, int port, + struct flow_cls_offload *cls, bool ingress); int (*port_mirror_add)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); void (*port_mirror_del)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); + int (*port_policer_add)(struct dsa_switch *ds, int port, + struct dsa_mall_policer_tc_entry *policer); + void (*port_policer_del)(struct dsa_switch *ds, int port); int (*port_setup_tc)(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data); @@ -571,6 +596,16 @@ struct dsa_switch_ops { struct devlink_param_gset_ctx *ctx); int (*devlink_param_set)(struct dsa_switch *ds, u32 id, struct devlink_param_gset_ctx *ctx); + + /* + * MTU change functionality. Switches can also adjust their MRU through + * this method. By MTU, one understands the SDU (L2 payload) length. + * If the switch needs to account for the DSA tag on the CPU port, this + * method needs to to do so privately. + */ + int (*port_change_mtu)(struct dsa_switch *ds, int port, + int new_mtu); + int (*port_max_mtu)(struct dsa_switch *ds, int port); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ diff --git a/include/net/dst.h b/include/net/dst.h index 3448cf865ede..07adfacd8088 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -35,7 +35,6 @@ struct dst_entry { int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); unsigned short flags; -#define DST_HOST 0x0001 #define DST_NOXFRM 0x0002 #define DST_NOPOLICY 0x0004 #define DST_NOCOUNT 0x0008 diff --git a/include/net/esp.h b/include/net/esp.h index 117652eb6ea3..9c5637d41d95 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -11,6 +11,22 @@ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) return (struct ip_esp_hdr *)skb_transport_header(skb); } +static inline void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) +{ + /* Fill padding... */ + if (tfclen) { + memset(tail, 0, tfclen); + tail += tfclen; + } + do { + int i; + for (i = 0; i < plen - 2; i++) + tail[i] = i + 1; + } while (0); + tail[plen - 2] = plen - 2; + tail[plen - 1] = proto; +} + struct esp_info { struct ip_esp_hdr *esph; __be64 seqno; diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 54e227e6b06a..a259050f84af 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -108,6 +108,7 @@ struct fib_rule_notifier_info { [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ [FRA_PRIORITY] = { .type = NLA_U32 }, \ [FRA_FWMARK] = { .type = NLA_U32 }, \ + [FRA_TUN_ID] = { .type = NLA_U64 }, \ [FRA_FWMASK] = { .type = NLA_U32 }, \ [FRA_TABLE] = { .type = NLA_U32 }, \ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \ diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index c6f7bd22db60..3619c6acf60f 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -3,6 +3,7 @@ #include <linux/kernel.h> #include <linux/list.h> +#include <linux/netlink.h> #include <net/flow_dissector.h> #include <linux/rhashtable.h> @@ -68,6 +69,10 @@ struct flow_match_enc_opts { struct flow_dissector_key_enc_opts *key, *mask; }; +struct flow_match_ct { + struct flow_dissector_key_ct *key, *mask; +}; + struct flow_rule; void flow_rule_match_meta(const struct flow_rule *rule, @@ -110,6 +115,8 @@ void flow_rule_match_enc_keyid(const struct flow_rule *rule, struct flow_match_enc_keyid *out); void flow_rule_match_enc_opts(const struct flow_rule *rule, struct flow_match_enc_opts *out); +void flow_rule_match_ct(const struct flow_rule *rule, + struct flow_match_ct *out); enum flow_action_id { FLOW_ACTION_ACCEPT = 0, @@ -130,11 +137,13 @@ enum flow_action_id { FLOW_ACTION_CSUM, FLOW_ACTION_MARK, FLOW_ACTION_PTYPE, + FLOW_ACTION_PRIORITY, FLOW_ACTION_WAKE, FLOW_ACTION_QUEUE, FLOW_ACTION_SAMPLE, FLOW_ACTION_POLICE, FLOW_ACTION_CT, + FLOW_ACTION_CT_METADATA, FLOW_ACTION_MPLS_PUSH, FLOW_ACTION_MPLS_POP, FLOW_ACTION_MPLS_MANGLE, @@ -154,10 +163,35 @@ enum flow_action_mangle_base { FLOW_ACT_MANGLE_HDR_TYPE_UDP, }; +enum flow_action_hw_stats_bit { + FLOW_ACTION_HW_STATS_IMMEDIATE_BIT, + FLOW_ACTION_HW_STATS_DELAYED_BIT, +}; + +enum flow_action_hw_stats { + FLOW_ACTION_HW_STATS_DISABLED = 0, + FLOW_ACTION_HW_STATS_IMMEDIATE = + BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT), + FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT), + FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE | + FLOW_ACTION_HW_STATS_DELAYED, +}; + typedef void (*action_destr)(void *priv); +struct flow_action_cookie { + u32 cookie_len; + u8 cookie[]; +}; + +struct flow_action_cookie *flow_action_cookie_create(void *data, + unsigned int len, + gfp_t gfp); +void flow_action_cookie_destroy(struct flow_action_cookie *cookie); + struct flow_action_entry { enum flow_action_id id; + enum flow_action_hw_stats hw_stats; action_destr destructor; void *destructor_priv; union { @@ -168,7 +202,8 @@ struct flow_action_entry { __be16 proto; u8 prio; } vlan; - struct { /* FLOW_ACTION_PACKET_EDIT */ + struct { /* FLOW_ACTION_MANGLE */ + /* FLOW_ACTION_ADD */ enum flow_action_mangle_base htype; u32 offset; u32 mask; @@ -178,6 +213,7 @@ struct flow_action_entry { u32 csum_flags; /* FLOW_ACTION_CSUM */ u32 mark; /* FLOW_ACTION_MARK */ u16 ptype; /* FLOW_ACTION_PTYPE */ + u32 priority; /* FLOW_ACTION_PRIORITY */ struct { /* FLOW_ACTION_QUEUE */ u32 ctx; u32 index; @@ -196,7 +232,13 @@ struct flow_action_entry { struct { /* FLOW_ACTION_CT */ int action; u16 zone; + struct nf_flowtable *flow_table; } ct; + struct { + unsigned long cookie; + u32 mark; + u32 labels[4]; + } ct_metadata; struct { /* FLOW_ACTION_MPLS_PUSH */ u32 label; __be16 proto; @@ -214,11 +256,12 @@ struct flow_action_entry { u8 ttl; } mpls_mangle; }; + struct flow_action_cookie *cookie; /* user defined action cookie */ }; struct flow_action { unsigned int num_entries; - struct flow_action_entry entries[0]; + struct flow_action_entry entries[]; }; static inline bool flow_action_has_entries(const struct flow_action *action) @@ -238,7 +281,77 @@ static inline bool flow_offload_has_one_action(const struct flow_action *action) } #define flow_action_for_each(__i, __act, __actions) \ - for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) + for (__i = 0, __act = &(__actions)->entries[0]; \ + __i < (__actions)->num_entries; \ + __act = &(__actions)->entries[++__i]) + +static inline bool +flow_action_mixed_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack) +{ + const struct flow_action_entry *action_entry; + u8 uninitialized_var(last_hw_stats); + int i; + + if (flow_offload_has_one_action(action)) + return true; + + flow_action_for_each(i, action_entry, action) { + if (i && action_entry->hw_stats != last_hw_stats) { + NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); + return false; + } + last_hw_stats = action_entry->hw_stats; + } + return true; +} + +static inline const struct flow_action_entry * +flow_action_first_entry_get(const struct flow_action *action) +{ + WARN_ON(!flow_action_has_entries(action)); + return &action->entries[0]; +} + +static inline bool +__flow_action_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack, + bool check_allow_bit, + enum flow_action_hw_stats_bit allow_bit) +{ + const struct flow_action_entry *action_entry; + + if (!flow_action_has_entries(action)) + return true; + if (!flow_action_mixed_hw_stats_check(action, extack)) + return false; + action_entry = flow_action_first_entry_get(action); + if (!check_allow_bit && + action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) { + NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\""); + return false; + } else if (check_allow_bit && + !(action_entry->hw_stats & BIT(allow_bit))) { + NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type"); + return false; + } + return true; +} + +static inline bool +flow_action_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack, + enum flow_action_hw_stats_bit allow_bit) +{ + return __flow_action_hw_stats_check(action, extack, true, allow_bit); +} + +static inline bool +flow_action_basic_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack) +{ + return __flow_action_hw_stats_check(action, extack, false, 0); +} struct flow_rule { struct flow_match match; @@ -257,14 +370,24 @@ struct flow_stats { u64 pkts; u64 bytes; u64 lastused; + enum flow_action_hw_stats used_hw_stats; + bool used_hw_stats_valid; }; static inline void flow_stats_update(struct flow_stats *flow_stats, - u64 bytes, u64 pkts, u64 lastused) + u64 bytes, u64 pkts, u64 lastused, + enum flow_action_hw_stats used_hw_stats) { flow_stats->pkts += pkts; flow_stats->bytes += bytes; flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); + + /* The driver should pass value with a maximum of one bit set. + * Passing FLOW_ACTION_HW_STATS_ANY is invalid. + */ + WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY); + flow_stats->used_hw_stats |= used_hw_stats; + flow_stats->used_hw_stats_valid = true; } enum flow_block_command { @@ -410,6 +533,7 @@ void flow_indr_block_cb_unregister(struct net_device *dev, void flow_indr_block_call(struct net_device *dev, struct flow_block_offload *bo, - enum flow_block_command command); + enum flow_block_command command, + enum tc_setup_type type); #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index fe96bf247aac..81b965953036 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -85,9 +85,8 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, int iif, int sdif, bool *refcounted) { - struct sock *sk = skb_steal_sock(skb); + struct sock *sk = skb_steal_sock(skb, refcounted); - *refcounted = true; if (sk) return sk; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 895546058a20..a3f076befa4f 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -335,4 +335,10 @@ static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) if (icsk->icsk_ack.pingpong < U8_MAX) icsk->icsk_ack.pingpong++; } + +static inline bool inet_csk_has_ulp(struct sock *sk) +{ + return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops; +} + #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index d0019d3395cf..ad64ba6a057f 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -379,10 +379,9 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, const int sdif, bool *refcounted) { - struct sock *sk = skb_steal_sock(skb); + struct sock *sk = skb_steal_sock(skb, refcounted); const struct iphdr *iph = ip_hdr(skb); - *refcounted = true; if (sk) return sk; diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 34c4436fd18f..a7ce00af6c44 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -52,7 +52,7 @@ struct ip_options { unsigned char router_alert; unsigned char cipso; unsigned char __pad2; - unsigned char __data[0]; + unsigned char __data[]; }; struct ip_options_rcu { diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 7bec95df4f80..27ec612cd4a4 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -76,6 +76,15 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb, } } +static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + struct tcphdr *th = tcp_hdr(skb); + + ipv6h->payload_len = 0; + th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); +} + #if IS_ENABLED(CONFIG_IPV6) static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index fd60a8ac02ee..80262d2980f5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -190,15 +190,14 @@ struct fib6_info { u8 should_flush:1, dst_nocount:1, dst_nopolicy:1, - dst_host:1, fib6_destroying:1, offload:1, trap:1, - unused:1; + unused:2; struct rcu_head rcu; struct nexthop *nh; - struct fib6_nh fib6_nh[0]; + struct fib6_nh fib6_nh[]; }; struct rt6_info { diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index b69c16cbbf71..f7543c095b33 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -16,7 +16,7 @@ struct route_info { reserved_h:3; #endif __be32 lifetime; - __u8 prefix[0]; /* 0,8 or 16 */ + __u8 prefix[]; /* 0,8 or 16 */ }; #include <net/addrconf.h> diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 6a1ae49809de..59e0d4e99f94 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -153,7 +153,7 @@ struct fib_info { bool nh_updated; struct nexthop *nh; struct rcu_head rcu; - struct fib_nh fib_nh[0]; + struct fib_nh fib_nh[]; }; @@ -250,7 +250,7 @@ struct fib_table { int tb_num_default; struct rcu_head rcu; unsigned long *tb_data; - unsigned long __data[0]; + unsigned long __data[]; }; struct fib_dump_filter { @@ -470,8 +470,9 @@ int fib_nh_init(struct net *net, struct fib_nh *fib_nh, struct fib_config *cfg, int nh_weight, struct netlink_ext_ack *extack); void fib_nh_release(struct net *net, struct fib_nh *fib_nh); -int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *fc_encap, - u16 fc_encap_type, void *cfg, gfp_t gfp_flags, +int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc, + struct nlattr *fc_encap, u16 fc_encap_type, + void *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib_nh_common_release(struct fib_nh_common *nhc); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index cec1a54401f2..1bf8065fe871 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1027,6 +1027,12 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool connected); +struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb, + struct net_device *dev, + struct net *net, struct socket *sock, + struct in6_addr *saddr, + const struct ip_tunnel_info *info, + u8 protocol, bool use_cache); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *orig_dst); diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 5d6c5b1fc695..05cfd6ff6528 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -30,11 +30,11 @@ struct lwtunnel_state { int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*orig_input)(struct sk_buff *); struct rcu_head rcu; - __u8 data[0]; + __u8 data[]; }; struct lwtunnel_encap_ops { - int (*build_state)(struct nlattr *encap, + int (*build_state)(struct net *net, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **ts, struct netlink_ext_ack *extack); @@ -113,7 +113,7 @@ int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, struct netlink_ext_ack *extack); -int lwtunnel_build_state(u16 encap_type, +int lwtunnel_build_state(struct net *net, u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws, @@ -209,7 +209,7 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, return 0; } -static inline int lwtunnel_build_state(u16 encap_type, +static inline int lwtunnel_build_state(struct net *net, u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, struct lwtunnel_state **lws, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 77e6b5a83b06..b6b4de0e4b5e 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -316,6 +316,7 @@ struct ieee80211_vif_chanctx_switch { * functionality changed for this BSS (AP mode). * @BSS_CHANGED_TWT: TWT status changed * @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed. + * @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed * */ enum ieee80211_bss_change { @@ -348,6 +349,7 @@ enum ieee80211_bss_change { BSS_CHANGED_FTM_RESPONDER = 1<<26, BSS_CHANGED_TWT = 1<<27, BSS_CHANGED_HE_OBSS_PD = 1<<28, + BSS_CHANGED_HE_BSS_COLOR = 1<<29, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -494,7 +496,6 @@ struct ieee80211_ftm_responder_params { * This structure keeps information about a BSS (and an association * to that BSS) that can change during the lifetime of the BSS. * - * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK * @uora_exists: is the UORA element advertised by AP @@ -573,7 +574,7 @@ struct ieee80211_ftm_responder_params { * @ssid: The SSID of the current vif. Valid in AP and IBSS mode. * @ssid_len: Length of SSID given in @ssid. * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode. - * @txpower: TX power in dBm + * @txpower: TX power in dBm. INT_MIN means not configured. * @txpower_type: TX power adjustment used to control per packet Transmit * Power Control (TPC) in lower driver for the current vif. In particular * TPC is enabled if value passed in %txpower_type is @@ -604,10 +605,10 @@ struct ieee80211_ftm_responder_params { * in order to discover all the nontransmitted BSSIDs in the set. * @he_operation: HE operation information of the AP we are connected to * @he_obss_pd: OBSS Packet Detection parameters. + * @he_bss_color: BSS coloring settings, if BSS supports HE */ struct ieee80211_bss_conf { const u8 *bssid; - u8 bss_color; u8 htc_trig_based_pkt_ext; bool multi_sta_back_32bit; bool uora_exists; @@ -667,6 +668,7 @@ struct ieee80211_bss_conf { u8 profile_periodicity; struct ieee80211_he_operation he_operation; struct ieee80211_he_obss_pd he_obss_pd; + struct cfg80211_he_bss_color he_bss_color; }; /** @@ -826,6 +828,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_AMSDU = BIT(3), IEEE80211_TX_CTRL_FAST_XMIT = BIT(4), IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5), + IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6), }; /* @@ -1984,6 +1987,7 @@ struct ieee80211_sta_txpwr { * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID + * @txpwr: the station tx power configuration * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames */ @@ -3448,6 +3452,10 @@ enum ieee80211_reconfig_type { * in AP mode, this callback will not be called when the flag * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic. * + * @sta_set_txpwr: Configure the station tx power. This callback set the tx + * power for the station. + * This callback can sleep. + * * @sta_state: Notifies low level driver about state transition of a * station (which can be the AP, a client, IBSS/WDS/mesh peer etc.) * This callback is mutually exclusive with @sta_add/@sta_remove. @@ -3773,6 +3781,9 @@ enum ieee80211_reconfig_type { * * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep) * @abort_pmsr: abort peer measurement (this call can sleep) + * @set_tid_config: Apply TID specific configurations. This callback may sleep. + * @reset_tid_config: Reset TID specific configuration for the peer. + * This callback may sleep. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -4077,6 +4088,13 @@ struct ieee80211_ops { struct cfg80211_pmsr_request *request); void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); + int (*set_tid_config)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct cfg80211_tid_config *tid_conf); + int (*reset_tid_config)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 tids); }; /** @@ -4660,6 +4678,26 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb); /** + * ieee80211_tx_status_8023 - transmit status callback for 802.3 frame format + * + * Call this function for all transmitted data frames after their transmit + * completion. This callback should only be called for data frames which + * are are using driver's (or hardware's) offload capability of encap/decap + * 802.11 frames. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other and all + * calls in the same tx status family. + * + * @hw: the hardware the frame was transmitted by + * @vif: the interface for which the frame was transmitted + * @skb: the frame that was transmitted, owned by mac80211 after this call + */ +void ieee80211_tx_status_8023(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct sk_buff *skb); + +/** * ieee80211_report_low_ack - report non-responding station * * When operating in AP-mode, call this function to report a non-responding @@ -6479,5 +6517,16 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, int len); +/** + * ieee80211_set_hw_80211_encap - enable hardware encapsulation offloading. + * + * This function is used to notify mac80211 that a vif can be passed raw 802.3 + * frames. The driver needs to then handle the 802.11 encapsulation inside the + * hardware or firmware. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @enable: indicate if the feature should be turned on or off + */ +bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable); #endif /* MAC80211_H */ diff --git a/include/net/macsec.h b/include/net/macsec.h index 92e43db8b566..52874cdfe226 100644 --- a/include/net/macsec.h +++ b/include/net/macsec.h @@ -11,18 +11,48 @@ #include <uapi/linux/if_link.h> #include <uapi/linux/if_macsec.h> -typedef u64 __bitwise sci_t; +#define MACSEC_DEFAULT_PN_LEN 4 +#define MACSEC_XPN_PN_LEN 8 +#define MACSEC_SALT_LEN 12 #define MACSEC_NUM_AN 4 /* 2 bits for the association number */ +typedef u64 __bitwise sci_t; +typedef u32 __bitwise ssci_t; + +typedef union salt { + struct { + u32 ssci; + u64 pn; + } __packed; + u8 bytes[MACSEC_SALT_LEN]; +} __packed salt_t; + +typedef union pn { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u32 lower; + u32 upper; +#elif defined(__BIG_ENDIAN_BITFIELD) + u32 upper; + u32 lower; +#else +#error "Please fix <asm/byteorder.h>" +#endif + }; + u64 full64; +} pn_t; + /** * struct macsec_key - SA key * @id: user-provided key identifier * @tfm: crypto struct, key storage + * @salt: salt used to generate IV in XPN cipher suites */ struct macsec_key { u8 id[MACSEC_KEYID_LEN]; struct crypto_aead *tfm; + salt_t salt; }; struct macsec_rx_sc_stats { @@ -58,18 +88,34 @@ struct macsec_tx_sc_stats { __u64 OutOctetsEncrypted; }; +struct macsec_dev_stats { + __u64 OutPktsUntagged; + __u64 InPktsUntagged; + __u64 OutPktsTooLong; + __u64 InPktsNoTag; + __u64 InPktsBadTag; + __u64 InPktsUnknownSCI; + __u64 InPktsNoSCI; + __u64 InPktsOverrun; +}; + /** * struct macsec_rx_sa - receive secure association * @active: * @next_pn: packet number expected for the next packet * @lock: protects next_pn manipulations * @key: key structure + * @ssci: short secure channel identifier * @stats: per-SA stats */ struct macsec_rx_sa { struct macsec_key key; + ssci_t ssci; spinlock_t lock; - u32 next_pn; + union { + pn_t next_pn_halves; + u64 next_pn; + }; refcount_t refcnt; bool active; struct macsec_rx_sa_stats __percpu *stats; @@ -110,12 +156,17 @@ struct macsec_rx_sc { * @next_pn: packet number to use for the next packet * @lock: protects next_pn manipulations * @key: key structure + * @ssci: short secure channel identifier * @stats: per-SA stats */ struct macsec_tx_sa { struct macsec_key key; + ssci_t ssci; spinlock_t lock; - u32 next_pn; + union { + pn_t next_pn_halves; + u64 next_pn; + }; refcount_t refcnt; bool active; struct macsec_tx_sa_stats __percpu *stats; @@ -152,6 +203,7 @@ struct macsec_tx_sc { * @key_len: length of keys used by the cipher suite * @icv_len: length of ICV used by the cipher suite * @validate_frames: validation mode + * @xpn: enable XPN for this SecY * @operational: MAC_Operational flag * @protect_frames: enable protection for this SecY * @replay_protect: enable packet number checks on receive @@ -166,6 +218,7 @@ struct macsec_secy { u16 key_len; u16 icv_len; enum macsec_validation_type validate_frames; + bool xpn; bool operational; bool protect_frames; bool replay_protect; @@ -178,7 +231,10 @@ struct macsec_secy { * struct macsec_context - MACsec context for hardware offloading */ struct macsec_context { - struct phy_device *phydev; + union { + struct net_device *netdev; + struct phy_device *phydev; + }; enum macsec_offload offload; struct macsec_secy *secy; @@ -191,6 +247,13 @@ struct macsec_context { struct macsec_tx_sa *tx_sa; }; } sa; + union { + struct macsec_tx_sc_stats *tx_sc_stats; + struct macsec_tx_sa_stats *tx_sa_stats; + struct macsec_rx_sc_stats *rx_sc_stats; + struct macsec_rx_sa_stats *rx_sa_stats; + struct macsec_dev_stats *dev_stats; + } stats; u8 prepare:1; }; @@ -217,6 +280,12 @@ struct macsec_ops { int (*mdo_add_txsa)(struct macsec_context *ctx); int (*mdo_upd_txsa)(struct macsec_context *ctx); int (*mdo_del_txsa)(struct macsec_context *ctx); + /* Statistics */ + int (*mdo_get_dev_stats)(struct macsec_context *ctx); + int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx); + int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx); + int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx); + int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx); }; void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa); diff --git a/include/net/mip6.h b/include/net/mip6.h index f1c28971c362..67cd7e50804c 100644 --- a/include/net/mip6.h +++ b/include/net/mip6.h @@ -25,7 +25,7 @@ struct ip6_mh { __u8 ip6mh_reserved; __u16 ip6mh_cksum; /* Followed by type specific messages */ - __u8 data[0]; + __u8 data[]; } __packed; #define IP6_MH_TYPE_BRR 0 /* Binding Refresh Request */ diff --git a/include/net/mld.h b/include/net/mld.h index b0f5b3105ef0..496bddb59942 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -24,12 +24,12 @@ struct mld2_grec { __u8 grec_auxwords; __be16 grec_nsrcs; struct in6_addr grec_mca; - struct in6_addr grec_src[0]; + struct in6_addr grec_src[]; }; struct mld2_report { struct icmp6hdr mld2r_hdr; - struct mld2_grec mld2r_grec[0]; + struct mld2_grec mld2r_grec[]; }; #define mld2r_type mld2r_hdr.icmp6_type @@ -55,7 +55,7 @@ struct mld2_query { #endif __u8 mld2q_qqic; __be16 mld2q_nsrcs; - struct in6_addr mld2q_srcs[0]; + struct in6_addr mld2q_srcs[]; }; #define mld2q_type mld2q_hdr.icmp6_type diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h index 6b4759eae158..9deb3a3735da 100644 --- a/include/net/mpls_iptunnel.h +++ b/include/net/mpls_iptunnel.h @@ -11,7 +11,7 @@ struct mpls_iptunnel_encap { u8 ttl_propagate; u8 default_ttl; u8 reserved1; - u32 label[0]; + u32 label[]; }; static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate) diff --git a/include/net/mptcp.h b/include/net/mptcp.h index c971d25431ea..0e7c5471010b 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -12,6 +12,8 @@ #include <linux/tcp.h> #include <linux/types.h> +struct seq_file; + /* MPTCP sk_buff extension data */ struct mptcp_ext { u64 data_ack; @@ -33,6 +35,21 @@ struct mptcp_out_options { u16 suboptions; u64 sndr_key; u64 rcvr_key; + union { + struct in_addr addr; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + struct in6_addr addr6; +#endif + }; + u8 addr_id; + u64 ahmac; + u8 rm_id; + u8 join_id; + u8 backup; + u32 nonce; + u64 thmac; + u32 token; + u8 hmac[20]; struct mptcp_ext ext_copy; #endif }; @@ -106,6 +123,9 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, skb_ext_find(from, SKB_EXT_MPTCP)); } +bool mptcp_sk_is_subflow(const struct sock *sk); + +void mptcp_seq_show(struct seq_file *seq); #else static inline void mptcp_init(void) @@ -172,6 +192,12 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, return true; } +static inline bool mptcp_sk_is_subflow(const struct sock *sk) +{ + return false; +} + +static inline void mptcp_seq_show(struct seq_file *seq) { } #endif /* CONFIG_MPTCP */ #if IS_ENABLED(CONFIG_MPTCP_IPV6) diff --git a/include/net/ndisc.h b/include/net/ndisc.h index b5ebeb3b0de0..7d107113f988 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -41,6 +41,7 @@ enum { ND_OPT_DNSSL = 31, /* RFC6106 */ ND_OPT_6CO = 34, /* RFC6775 */ ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */ + ND_OPT_PREF64 = 38, /* RFC-ietf-6man-ra-pref64-09 */ __ND_OPT_MAX }; @@ -80,12 +81,12 @@ extern struct neigh_table nd_tbl; struct nd_msg { struct icmp6hdr icmph; struct in6_addr target; - __u8 opt[0]; + __u8 opt[]; }; struct rs_msg { struct icmp6hdr icmph; - __u8 opt[0]; + __u8 opt[]; }; struct ra_msg { @@ -98,7 +99,7 @@ struct rd_msg { struct icmp6hdr icmph; struct in6_addr target; struct in6_addr dest; - __u8 opt[0]; + __u8 opt[]; }; struct nd_opt_hdr { diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 8ec77bfdc1a4..e1476775769c 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -174,7 +174,7 @@ struct pneigh_entry { struct net_device *dev; u8 flags; u8 protocol; - u8 key[0]; + u8 key[]; }; /* diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 854d39ef1ca3..ab96fb59131c 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -168,6 +168,9 @@ struct net { #ifdef CONFIG_XFRM struct netns_xfrm xfrm; #endif + + atomic64_t net_cookie; /* written once */ + #if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; #endif @@ -225,6 +228,8 @@ extern struct list_head net_namespace_list; struct net *get_net_ns_by_pid(pid_t pid); struct net *get_net_ns_by_fd(int fd); +u64 net_gen_cookie(struct net *net); + #ifdef CONFIG_SYSCTL void ipx_register_sysctl(void); void ipx_unregister_sysctl(void); diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index f7a060c6eb28..7f44a771530e 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -65,6 +65,17 @@ static inline void nf_ct_set_acct(struct net *net, bool enable) #endif } +void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets, + unsigned int bytes); + +static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir, + unsigned int bytes) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + nf_ct_acct_add(ct, dir, 1, bytes); +#endif +} + void nf_conntrack_acct_pernet_init(struct net *net); int nf_conntrack_acct_init(void); diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 5ae5295aa46d..e1e588387103 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -45,7 +45,7 @@ enum nf_ct_ext_id { struct nf_ct_ext { u8 offset[NF_CT_EXT_NUM]; u8 len; - char data[0]; + char data[]; }; static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 6dd72396f534..659b0ea25b4d 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -14,7 +14,7 @@ struct nf_ct_timeout { __u16 l3num; const struct nf_conntrack_l4proto *l4proto; - char data[0]; + char data[]; }; struct ctnl_timeout { diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index e0f709d9d547..6bf69652f57d 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -16,6 +16,35 @@ struct nf_flow_rule; struct flow_offload; enum flow_offload_tuple_dir; +struct nf_flow_key { + struct flow_dissector_key_meta meta; + struct flow_dissector_key_control control; + struct flow_dissector_key_control enc_control; + struct flow_dissector_key_basic basic; + union { + struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; + }; + struct flow_dissector_key_keyid enc_key_id; + union { + struct flow_dissector_key_ipv4_addrs enc_ipv4; + struct flow_dissector_key_ipv6_addrs enc_ipv6; + }; + struct flow_dissector_key_tcp tcp; + struct flow_dissector_key_ports tp; +} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ + +struct nf_flow_match { + struct flow_dissector dissector; + struct nf_flow_key key; + struct nf_flow_key mask; +}; + +struct nf_flow_rule { + struct nf_flow_match match; + struct flow_rule *rule; +}; + struct nf_flowtable_type { struct list_head list; int family; @@ -33,7 +62,8 @@ struct nf_flowtable_type { }; enum nf_flowtable_flags { - NF_FLOWTABLE_HW_OFFLOAD = 0x1, + NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */ + NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */ }; struct nf_flowtable { @@ -44,6 +74,7 @@ struct nf_flowtable { struct delayed_work gc_work; unsigned int flags; struct flow_block flow_block; + struct rw_semaphore flow_block_lock; /* Guards flow_block */ possible_net_t net; }; @@ -129,10 +160,18 @@ struct nf_flow_route { struct flow_offload *flow_offload_alloc(struct nf_conn *ct); void flow_offload_free(struct flow_offload *flow); +int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, + flow_setup_cb_t *cb, void *cb_priv); +void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, + flow_setup_cb_t *cb, void *cb_priv); + int flow_offload_route_init(struct flow_offload *flow, const struct nf_flow_route *route); int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); +void flow_offload_refresh(struct nf_flowtable *flow_table, + struct flow_offload *flow); + struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table, struct flow_offload_tuple *tuple); void nf_flow_table_cleanup(struct net_device *dev); diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 47088083667b..e770bba00066 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -14,7 +14,10 @@ struct nf_queue_entry { struct sk_buff *skb; unsigned int id; unsigned int hook_index; /* index in hook_entries->hook[] */ - +#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + struct net_device *physin; + struct net_device *physout; +#endif struct nf_hook_state state; u16 size; /* sizeof(entry) + saved route keys */ @@ -35,7 +38,7 @@ void nf_unregister_queue_handler(struct net *net); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); void nf_queue_entry_get_refs(struct nf_queue_entry *entry); -void nf_queue_entry_release_refs(struct nf_queue_entry *entry); +void nf_queue_entry_free(struct nf_queue_entry *entry); static inline void init_hashrandom(u32 *jhash_initval) { diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 4170c033d461..6eb627b3c99b 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -224,7 +224,7 @@ int nft_validate_register_store(const struct nft_ctx *ctx, */ struct nft_userdata { u8 len; - unsigned char data[0]; + unsigned char data[]; }; /** @@ -266,6 +266,7 @@ struct nft_set_iter { * @size: number of set elements * @field_len: length of each field in concatenation, bytes * @field_count: number of concatenated fields in element + * @expr: set must support for expressions */ struct nft_set_desc { unsigned int klen; @@ -273,6 +274,7 @@ struct nft_set_desc { unsigned int size; u8 field_len[NFT_REG32_COUNT]; u8 field_count; + bool expr; }; /** @@ -385,21 +387,14 @@ struct nft_set_ops { * struct nft_set_type - nf_tables set type * * @ops: set ops for this type - * @list: used internally - * @owner: module reference * @features: features supported by the implementation */ struct nft_set_type { const struct nft_set_ops ops; - struct list_head list; - struct module *owner; u32 features; }; #define to_set_type(o) container_of(o, struct nft_set_type, ops) -int nft_register_set(struct nft_set_type *type); -void nft_unregister_set(struct nft_set_type *type); - /** * struct nft_set - nf_tables set instance * @@ -423,6 +418,7 @@ void nft_unregister_set(struct nft_set_type *type); * @policy: set parameterization (see enum nft_set_policies) * @udlen: user data length * @udata: user data + * @expr: stateful expression * @ops: set ops * @flags: set flags * @genmask: generation mask @@ -451,6 +447,7 @@ struct nft_set { u16 policy; u16 udlen; unsigned char *udata; + struct nft_expr *expr; /* runtime data below here */ const struct nft_set_ops *ops ____cacheline_aligned; u16 flags:14, @@ -572,7 +569,7 @@ struct nft_set_ext_tmpl { struct nft_set_ext { u8 genmask; u8 offset[NFT_SET_EXT_NUM]; - char data[0]; + char data[]; }; static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl) @@ -673,6 +670,10 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext) return nft_set_ext(ext, NFT_SET_EXT_OBJREF); } +struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, + const struct nft_set *set, + const struct nlattr *attr); + void *nft_set_elem_init(const struct nft_set *set, const struct nft_set_ext_tmpl *tmpl, const u32 *key, const u32 *key_end, const u32 *data, @@ -849,8 +850,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) return (void *)expr->data; } -struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, - const struct nlattr *nla); +int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src); void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr); int nft_expr_dump(struct sk_buff *skb, unsigned int attr, const struct nft_expr *expr); @@ -895,6 +895,18 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) return (void *)&rule->data[rule->dlen]; } +static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nft_expr *expr; + + if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) { + expr = nft_set_ext_expr(ext); + expr->ops->eval(expr, regs, pkt); + } +} + /* * The last pointer isn't really necessary, but the compiler isn't able to * determine that the result of nft_expr_last() is always the same since it @@ -1253,9 +1265,6 @@ void nft_trace_notify(struct nft_traceinfo *info); #define MODULE_ALIAS_NFT_EXPR(name) \ MODULE_ALIAS("nft-expr-" name) -#define MODULE_ALIAS_NFT_SET() \ - MODULE_ALIAS("nft-set") - #define MODULE_ALIAS_NFT_OBJ(type) \ MODULE_ALIAS("nft-obj-" __stringify(type)) @@ -1385,7 +1394,7 @@ struct nft_trans { int msg_type; bool put_net; struct nft_ctx ctx; - char data[0]; + char data[]; }; struct nft_trans_rule { diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 29e7e1021267..78516de14d31 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -69,12 +69,13 @@ extern const struct nft_expr_ops nft_payload_fast_ops; extern struct static_key_false nft_counters_enabled; extern struct static_key_false nft_trace_enabled; -extern struct nft_set_type nft_set_rhash_type; -extern struct nft_set_type nft_set_hash_type; -extern struct nft_set_type nft_set_hash_fast_type; -extern struct nft_set_type nft_set_rbtree_type; -extern struct nft_set_type nft_set_bitmap_type; -extern struct nft_set_type nft_set_pipapo_type; +extern const struct nft_set_type nft_set_rhash_type; +extern const struct nft_set_type nft_set_hash_type; +extern const struct nft_set_type nft_set_hash_fast_type; +extern const struct nft_set_type nft_set_rbtree_type; +extern const struct nft_set_type nft_set_bitmap_type; +extern const struct nft_set_type nft_set_pipapo_type; +extern const struct nft_set_type nft_set_pipapo_avx2_type; struct nft_expr; struct nft_regs; diff --git a/include/net/netlink.h b/include/net/netlink.h index 56c365dc6dc7..67c57d6942e3 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1466,6 +1466,21 @@ static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype, } /** + * nla_put_bitfield32 - Add a bitfield32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: value carrying bits + * @selector: selector of valid bits + */ +static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype, + __u32 value, __u32 selector) +{ + struct nla_bitfield32 tmp = { value, selector, }; + + return nla_put(skb, attrtype, sizeof(tmp), &tmp); +} + +/** * nla_get_u32 - return payload of u32 attribute * @nla: u32 netlink attribute */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 08b98414d94e..154b8f01499b 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -101,6 +101,7 @@ struct netns_ipv4 { int sysctl_ip_fwd_use_pmtu; int sysctl_ip_fwd_update_priority; int sysctl_ip_nonlocal_bind; + int sysctl_ip_autobind_reuse; /* Shall we try to damage output packets if routing dev changes? */ int sysctl_ip_dynaddr; int sysctl_ip_early_demux; diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h index b5fdb108d602..59b2c3a3db42 100644 --- a/include/net/netns/mib.h +++ b/include/net/netns/mib.h @@ -27,6 +27,9 @@ struct netns_mib { #if IS_ENABLED(CONFIG_TLS) DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics); #endif +#ifdef CONFIG_MPTCP + DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics); +#endif }; #endif diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 331ebbc94fe7..c440ccc861fc 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -73,7 +73,7 @@ struct nh_group { u16 num_nh; bool mpath; bool has_v4; - struct nh_grp_entry nh_entries[0]; + struct nh_grp_entry nh_entries[]; }; struct nexthop { diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index 6ab5a83f597c..0550e0380b8d 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -244,13 +244,13 @@ struct dest_spec_params { struct core_conn_create_dest_spec_params { __u8 type; __u8 length; - __u8 value[0]; + __u8 value[]; } __packed; struct nci_core_conn_create_cmd { __u8 destination_type; __u8 number_destination_params; - struct core_conn_create_dest_spec_params params[0]; + struct core_conn_create_dest_spec_params params[]; } __packed; #define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x05) @@ -321,7 +321,7 @@ struct nci_core_init_rsp_1 { __u8 status; __le32 nfcc_features; __u8 num_supported_rf_interfaces; - __u8 supported_rf_interfaces[0]; /* variable size array */ + __u8 supported_rf_interfaces[]; /* variable size array */ /* continuted in nci_core_init_rsp_2 */ } __packed; @@ -338,7 +338,7 @@ struct nci_core_init_rsp_2 { struct nci_core_set_config_rsp { __u8 status; __u8 num_params; - __u8 params_id[0]; /* variable size array */ + __u8 params_id[]; /* variable size array */ } __packed; #define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04) @@ -501,18 +501,18 @@ struct nci_rf_nfcee_action_ntf { __u8 nfcee_id; __u8 trigger; __u8 supported_data_length; - __u8 supported_data[0]; + __u8 supported_data[]; } __packed; #define NCI_OP_NFCEE_DISCOVER_NTF nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x00) struct nci_nfcee_supported_protocol { __u8 num_protocol; - __u8 supported_protocol[0]; + __u8 supported_protocol[]; } __packed; struct nci_nfcee_information_tlv { __u8 num_tlv; - __u8 information_tlv[0]; + __u8 information_tlv[]; } __packed; struct nci_nfcee_discover_ntf { diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 5d277d68fd8d..2cd3a261bcbc 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -146,7 +146,7 @@ struct nfc_evt_transaction { u32 aid_len; u8 aid[NFC_MAX_AID_LENGTH]; u8 params_len; - u8 params[0]; + u8 params[]; } __packed; struct nfc_genl_data { diff --git a/include/net/page_pool.h b/include/net/page_pool.h index cfbed00ba7ee..81d7773f96cd 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -151,6 +151,7 @@ struct page_pool *page_pool_create(const struct page_pool_params *params); #ifdef CONFIG_PAGE_POOL void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)); +void page_pool_release_page(struct page_pool *pool, struct page *page); #else static inline void page_pool_destroy(struct page_pool *pool) { @@ -160,41 +161,32 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)) { } +static inline void page_pool_release_page(struct page_pool *pool, + struct page *page) +{ +} #endif -/* Never call this directly, use helpers below */ -void __page_pool_put_page(struct page_pool *pool, struct page *page, - unsigned int dma_sync_size, bool allow_direct); +void page_pool_put_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, bool allow_direct); -static inline void page_pool_put_page(struct page_pool *pool, - struct page *page, bool allow_direct) +/* Same as above but will try to sync the entire area pool->max_len */ +static inline void page_pool_put_full_page(struct page_pool *pool, + struct page *page, bool allow_direct) { /* When page_pool isn't compiled-in, net/core/xdp.c doesn't * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - __page_pool_put_page(pool, page, -1, allow_direct); + page_pool_put_page(pool, page, -1, allow_direct); #endif } -/* Very limited use-cases allow recycle direct */ + +/* Same as above but the caller must guarantee safe context. e.g NAPI */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { - __page_pool_put_page(pool, page, -1, true); -} - -/* Disconnects a page (from a page_pool). API users can have a need - * to disconnect a page (from a page_pool), to allow it to be used as - * a regular page (that will eventually be returned to the normal - * page-allocator via put_page). - */ -void page_pool_unmap_page(struct page_pool *pool, struct page *page); -static inline void page_pool_release_page(struct page_pool *pool, - struct page *page) -{ -#ifdef CONFIG_PAGE_POOL - page_pool_unmap_page(pool, page); -#endif + page_pool_put_full_page(pool, page, true); } static inline dma_addr_t page_pool_get_dma_addr(struct page *page) diff --git a/include/net/pie.h b/include/net/pie.h index fd5a37cb7993..3fe2361e03b4 100644 --- a/include/net/pie.h +++ b/include/net/pie.h @@ -8,7 +8,7 @@ #include <net/inet_ecn.h> #include <net/pkt_sched.h> -#define MAX_PROB U64_MAX +#define MAX_PROB (U64_MAX >> BITS_PER_BYTE) #define DTIME_INVALID U64_MAX #define QUEUE_THRESHOLD 16384 #define DQCOUNT_INVALID -1 @@ -38,16 +38,15 @@ struct pie_params { /** * struct pie_vars - contains pie variables - * @qdelay: current queue delay - * @qdelay_old: queue delay in previous qdelay calculation - * @burst_time: burst time allowance - * @dq_tstamp: timestamp at which dq rate was last calculated - * @prob: drop probability - * @accu_prob: accumulated drop probability - * @dq_count: number of bytes dequeued in a measurement cycle - * @avg_dq_rate: calculated average dq rate - * @qlen_old: queue length during previous qdelay calculation - * @accu_prob_overflows: number of times accu_prob overflows + * @qdelay: current queue delay + * @qdelay_old: queue delay in previous qdelay calculation + * @burst_time: burst time allowance + * @dq_tstamp: timestamp at which dq rate was last calculated + * @prob: drop probability + * @accu_prob: accumulated drop probability + * @dq_count: number of bytes dequeued in a measurement cycle + * @avg_dq_rate: calculated average dq rate + * @backlog_old: queue backlog during previous qdelay calculation */ struct pie_vars { psched_time_t qdelay; @@ -58,8 +57,7 @@ struct pie_vars { u64 accu_prob; u64 dq_count; u32 avg_dq_rate; - u32 qlen_old; - u8 accu_prob_overflows; + u32 backlog_old; }; /** @@ -107,7 +105,6 @@ static inline void pie_vars_init(struct pie_vars *vars) vars->accu_prob = 0; vars->dq_count = DQCOUNT_INVALID; vars->avg_dq_rate = 0; - vars->accu_prob_overflows = 0; } static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb) @@ -127,12 +124,12 @@ static inline void pie_set_enqueue_time(struct sk_buff *skb) } bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, - struct pie_vars *vars, u32 qlen, u32 packet_size); + struct pie_vars *vars, u32 backlog, u32 packet_size); void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, - struct pie_vars *vars, u32 qlen); + struct pie_vars *vars, u32 backlog); void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, - u32 qlen); + u32 backlog); #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a972244ab193..04aa0649f3b0 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -72,6 +72,10 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block) int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode); +int tcf_classify_ingress(struct sk_buff *skb, + const struct tcf_block *ingress_block, + const struct tcf_proto *tp, struct tcf_result *res, + bool compat_mode); #else static inline bool tcf_block_shared(struct tcf_block *block) @@ -133,6 +137,15 @@ static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, { return TC_ACT_UNSPEC; } + +static inline int tcf_classify_ingress(struct sk_buff *skb, + const struct tcf_block *ingress_block, + const struct tcf_proto *tp, + struct tcf_result *res, bool compat_mode) +{ + return TC_ACT_UNSPEC; +} + #endif static inline unsigned long @@ -249,7 +262,8 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) static inline void tcf_exts_stats_update(const struct tcf_exts *exts, - u64 bytes, u64 packets, u64 lastuse) + u64 bytes, u64 packets, u64 lastuse, + u8 used_hw_stats, bool used_hw_stats_valid) { #ifdef CONFIG_NET_CLS_ACT int i; @@ -260,6 +274,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts, struct tc_action *a = exts->actions[i]; tcf_action_stats_update(a, bytes, packets, lastuse, true); + a->used_hw_stats = used_hw_stats; + a->used_hw_stats_valid = used_hw_stats_valid; } preempt_enable(); @@ -489,12 +505,16 @@ tcf_change_indev(struct net *net, struct nlattr *indev_tlv, struct net_device *dev; if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { - NL_SET_ERR_MSG(extack, "Interface name too long"); + NL_SET_ERR_MSG_ATTR(extack, indev_tlv, + "Interface name too long"); return -EINVAL; } dev = __dev_get_by_name(net, indev); - if (!dev) + if (!dev) { + NL_SET_ERR_MSG_ATTR(extack, indev_tlv, + "Network device not found"); return -ENODEV; + } return dev->ifindex; } @@ -509,7 +529,7 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } int tc_setup_flow_action(struct flow_action *flow_action, - const struct tcf_exts *exts, bool rtnl_held); + const struct tcf_exts *exts); void tc_cleanup_flow_action(struct flow_action *flow_action); int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, @@ -727,6 +747,7 @@ struct tc_red_qopt_offload_params { u32 limit; bool is_ecn; bool is_harddrop; + bool is_nodrop; struct gnet_stats_queue *qstats; }; @@ -881,4 +902,19 @@ struct tc_tbf_qopt_offload { }; }; +enum tc_fifo_command { + TC_FIFO_REPLACE, + TC_FIFO_DESTROY, + TC_FIFO_STATS, +}; + +struct tc_fifo_qopt_offload { + enum tc_fifo_command command; + u32 handle; + u32 parent; + union { + struct tc_qopt_offload_stats stats; + }; +}; + #endif diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 6a70845bd9ab..9092e697059e 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -75,7 +75,15 @@ struct qdisc_watchdog { void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc, clockid_t clockid); void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); -void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires); + +void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, + u64 delta_ns); + +static inline void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, + u64 expires) +{ + return qdisc_watchdog_schedule_range_ns(wd, expires, 0ULL); +} static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) @@ -181,7 +189,7 @@ struct tc_taprio_qopt_offload { u64 cycle_time_extension; size_t num_entries; - struct tc_taprio_sched_entry entries[0]; + struct tc_taprio_sched_entry entries[]; }; /* Reference counting */ diff --git a/include/net/red.h b/include/net/red.h index 9665582c4687..fc455445f4b2 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -179,6 +179,44 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) return true; } +static inline int red_get_flags(unsigned char qopt_flags, + unsigned char historic_mask, + struct nlattr *flags_attr, + unsigned char supported_mask, + struct nla_bitfield32 *p_flags, + unsigned char *p_userbits, + struct netlink_ext_ack *extack) +{ + struct nla_bitfield32 flags; + + if (qopt_flags && flags_attr) { + NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute"); + return -EINVAL; + } + + if (flags_attr) { + flags = nla_get_bitfield32(flags_attr); + } else { + flags.selector = historic_mask; + flags.value = qopt_flags & historic_mask; + } + + *p_flags = flags; + *p_userbits = qopt_flags & ~historic_mask; + return 0; +} + +static inline int red_validate_flags(unsigned char flags, + struct netlink_ext_ack *extack) +{ + if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) { + NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN"); + return -EINVAL; + } + + return 0; +} + static inline void red_set_parms(struct red_parms *p, u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, u8 Scell_log, u8 *stab, u32 max_P) diff --git a/include/net/route.h b/include/net/route.h index a9c60fc68e36..ff021cab657e 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -128,6 +128,12 @@ static inline struct rtable *__ip_route_output_key(struct net *net, struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, const struct sock *sk); +struct rtable *ip_route_output_tunnel(struct sk_buff *skb, + struct net_device *dev, + struct net *net, __be32 *saddr, + const struct ip_tunnel_info *info, + u8 protocol, bool use_cache); + struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig); @@ -225,7 +231,7 @@ int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt); void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); struct rtable *rt_dst_alloc(struct net_device *dev, unsigned int flags, u16 type, - bool nopolicy, bool noxfrm, bool will_cache); + bool nopolicy, bool noxfrm); struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt); struct in_ifaddr; diff --git a/include/net/rpl.h b/include/net/rpl.h new file mode 100644 index 000000000000..dceff60e8baf --- /dev/null +++ b/include/net/rpl.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * RPL implementation + * + * Author: + * (C) 2020 Alexander Aring <alex.aring@gmail.com> + */ + +#ifndef _NET_RPL_H +#define _NET_RPL_H + +#include <linux/rpl.h> + +#if IS_ENABLED(CONFIG_IPV6_RPL_LWTUNNEL) +extern int rpl_init(void); +extern void rpl_exit(void); +#else +static inline int rpl_init(void) +{ + return 0; +} + +static inline void rpl_exit(void) {} +#endif + +/* Worst decompression memory usage ipv6 address (16) + pad 7 */ +#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) + +static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n) +{ + return sizeof(struct ipv6_rpl_sr_hdr) + + ((n + 1) * sizeof(struct in6_addr)); +} + +size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, + unsigned char cmpre); + +void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr, + const struct ipv6_rpl_sr_hdr *inhdr, + const struct in6_addr *daddr, unsigned char n); + +void ipv6_rpl_srh_compress(struct ipv6_rpl_sr_hdr *outhdr, + const struct ipv6_rpl_sr_hdr *inhdr, + const struct in6_addr *daddr, unsigned char n); + +#endif /* _NET_RPL_H */ diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 151208704ed2..25d2ec4c8f00 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); int skb_do_redirect(struct sk_buff *); -static inline void skb_reset_tc(struct sk_buff *skb) -{ -#ifdef CONFIG_NET_CLS_ACT - skb->tc_redirected = 0; -#endif -} - -static inline bool skb_is_tc_redirected(const struct sk_buff *skb) -{ -#ifdef CONFIG_NET_CLS_ACT - return skb->tc_redirected; -#else - return false; -#endif -} - static inline bool skb_at_tc_ingress(const struct sk_buff *skb) { #ifdef CONFIG_NET_CLS_ACT @@ -1269,6 +1253,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, */ struct mini_Qdisc { struct tcf_proto *filter_list; + struct tcf_block *block; struct gnet_stats_basic_cpu __percpu *cpu_bstats; struct gnet_stats_queue __percpu *cpu_qstats; struct rcu_head rcu; @@ -1295,6 +1280,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, struct tcf_proto *tp_head); void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, struct mini_Qdisc __rcu **p_miniq); +void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, + struct tcf_block *block); static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) { diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 314a2fa21d6b..fb42c90348d3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -326,7 +326,7 @@ struct sctp_cookie { * the association TCB is re-constructed from the cookie. */ __u32 raw_addr_list_len; - struct sctp_init_chunk peer_init[0]; + struct sctp_init_chunk peer_init[]; }; diff --git a/include/net/sock.h b/include/net/sock.h index 328564525526..6d84784d33fa 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -527,10 +527,43 @@ enum sk_pacing { SK_PACING_FQ = 2, }; +/* Pointer stored in sk_user_data might not be suitable for copying + * when cloning the socket. For instance, it can point to a reference + * counted object. sk_user_data bottom bit is set if pointer must not + * be copied. + */ +#define SK_USER_DATA_NOCOPY 1UL +#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY) + +/** + * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied + * @sk: socket + */ +static inline bool sk_user_data_is_nocopy(const struct sock *sk) +{ + return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); +} + #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) -#define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) -#define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) +#define rcu_dereference_sk_user_data(sk) \ +({ \ + void *__tmp = rcu_dereference(__sk_user_data((sk))); \ + (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \ +}) +#define rcu_assign_sk_user_data(sk, ptr) \ +({ \ + uintptr_t __tmp = (uintptr_t)(ptr); \ + WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ + rcu_assign_pointer(__sk_user_data((sk)), __tmp); \ +}) +#define rcu_assign_sk_user_data_nocopy(sk, ptr) \ +({ \ + uintptr_t __tmp = (uintptr_t)(ptr); \ + WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ + rcu_assign_pointer(__sk_user_data((sk)), \ + __tmp | SK_USER_DATA_NOCOPY); \ +}) /* * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK @@ -1626,6 +1659,7 @@ void sock_rfree(struct sk_buff *skb); void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); +void sock_pfree(struct sk_buff *skb); #else #define sock_edemux sock_efree #endif @@ -2493,16 +2527,14 @@ void sock_net_set(struct sock *sk, struct net *net) write_pnet(&sk->sk_net, net); } -static inline struct sock *skb_steal_sock(struct sk_buff *skb) +static inline bool +skb_sk_is_prefetched(struct sk_buff *skb) { - if (skb->sk) { - struct sock *sk = skb->sk; - - skb->destructor = NULL; - skb->sk = NULL; - return sk; - } - return NULL; +#ifdef CONFIG_INET + return skb->destructor == sock_pfree; +#else + return false; +#endif /* CONFIG_INET */ } /* This helper checks if a socket is a full socket, @@ -2513,6 +2545,35 @@ static inline bool sk_fullsock(const struct sock *sk) return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); } +static inline bool +sk_is_refcounted(struct sock *sk) +{ + /* Only full sockets have sk->sk_flags. */ + return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); +} + +/** + * skb_steal_sock + * @skb to steal the socket from + * @refcounted is set to true if the socket is reference-counted + */ +static inline struct sock * +skb_steal_sock(struct sk_buff *skb, bool *refcounted) +{ + if (skb->sk) { + struct sock *sk = skb->sk; + + *refcounted = true; + if (skb_sk_is_prefetched(skb)) + *refcounted = sk_is_refcounted(sk); + skb->destructor = NULL; + skb->sk = NULL; + return sk; + } + *refcounted = false; + return NULL; +} + /* Checks if this SKB belongs to an HW offloaded socket * and whether any SW fallbacks are required based on dev. * Check decrypted mark in case skb_orphan() cleared socket. diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 43f4a818d88f..505f1e18e9bf 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -24,7 +24,7 @@ struct sock_reuseport { unsigned int bind_inany:1; unsigned int has_conns:1; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ - struct sock *socks[0]; /* array of sock pointers */ + struct sock *socks[]; /* array of sock pointers */ }; extern int reuseport_alloc(struct sock *sk, bool bind_inany); @@ -55,6 +55,4 @@ static inline bool reuseport_has_conns(struct sock *sk, bool set) return ret; } -int reuseport_get_id(struct sock_reuseport *reuse); - #endif /* _SOCK_REUSEPORT_H */ diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h index bdc20ab3b88d..79654bcb9a29 100644 --- a/include/net/tc_act/tc_ct.h +++ b/include/net/tc_act/tc_ct.h @@ -25,6 +25,9 @@ struct tcf_ct_params { u16 ct_action; struct rcu_head rcu; + + struct tcf_ct_flow_table *ct_ft; + struct nf_flowtable *nf_ft; }; struct tcf_ct { @@ -33,8 +36,10 @@ struct tcf_ct { }; #define to_ct(a) ((struct tcf_ct *)a) -#define to_ct_params(a) ((struct tcf_ct_params *) \ - rtnl_dereference((to_ct(a)->params))) +#define to_ct_params(a) \ + ((struct tcf_ct_params *) \ + rcu_dereference_protected(to_ct(a)->params, \ + lockdep_is_held(&a->tcfa_lock))) static inline uint16_t tcf_ct_zone(const struct tc_action *a) { @@ -46,11 +51,27 @@ static inline int tcf_ct_action(const struct tc_action *a) return to_ct_params(a)->ct_action; } +static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) +{ + return to_ct_params(a)->nf_ft; +} + #else static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; } static inline int tcf_ct_action(const struct tc_action *a) { return 0; } +static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) +{ + return NULL; +} #endif /* CONFIG_NF_CONNTRACK */ +#if IS_ENABLED(CONFIG_NET_ACT_CT) +void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie); +#else +static inline void +tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { } +#endif + static inline bool is_tcf_ct(const struct tc_action *a) { #if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK) diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index cfdc7cb82cad..f098ad4424be 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -54,7 +54,8 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act) struct tcf_police *police = to_police(act); struct tcf_police_params *params; - params = rcu_dereference_bh_rtnl(police->params); + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); return params->rate.rate_bytes_ps; } @@ -63,7 +64,8 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) struct tcf_police *police = to_police(act); struct tcf_police_params *params; - params = rcu_dereference_bh_rtnl(police->params); + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); return params->tcfp_burst; } diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h index b22a1f641f02..00bfee70609e 100644 --- a/include/net/tc_act/tc_skbedit.h +++ b/include/net/tc_act/tc_skbedit.h @@ -27,8 +27,8 @@ struct tcf_skbedit { }; #define to_skbedit(a) ((struct tcf_skbedit *)a) -/* Return true iff action is mark */ -static inline bool is_tcf_skbedit_mark(const struct tc_action *a) +/* Return true iff action is the one identified by FLAG. */ +static inline bool is_tcf_skbedit_with_flag(const struct tc_action *a, u32 flag) { #ifdef CONFIG_NET_CLS_ACT u32 flags; @@ -37,12 +37,18 @@ static inline bool is_tcf_skbedit_mark(const struct tc_action *a) rcu_read_lock(); flags = rcu_dereference(to_skbedit(a)->params)->flags; rcu_read_unlock(); - return flags == SKBEDIT_F_MARK; + return flags == flag; } #endif return false; } +/* Return true iff action is mark */ +static inline bool is_tcf_skbedit_mark(const struct tc_action *a) +{ + return is_tcf_skbedit_with_flag(a, SKBEDIT_F_MARK); +} + static inline u32 tcf_skbedit_mark(const struct tc_action *a) { u32 mark; @@ -57,17 +63,7 @@ static inline u32 tcf_skbedit_mark(const struct tc_action *a) /* Return true iff action is ptype */ static inline bool is_tcf_skbedit_ptype(const struct tc_action *a) { -#ifdef CONFIG_NET_CLS_ACT - u32 flags; - - if (a->ops && a->ops->id == TCA_ID_SKBEDIT) { - rcu_read_lock(); - flags = rcu_dereference(to_skbedit(a)->params)->flags; - rcu_read_unlock(); - return flags == SKBEDIT_F_PTYPE; - } -#endif - return false; + return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PTYPE); } static inline u32 tcf_skbedit_ptype(const struct tc_action *a) @@ -81,4 +77,21 @@ static inline u32 tcf_skbedit_ptype(const struct tc_action *a) return ptype; } +/* Return true iff action is priority */ +static inline bool is_tcf_skbedit_priority(const struct tc_action *a) +{ + return is_tcf_skbedit_with_flag(a, SKBEDIT_F_PRIORITY); +} + +static inline u32 tcf_skbedit_priority(const struct tc_action *a) +{ + u32 priority; + + rcu_read_lock(); + priority = rcu_dereference(to_skbedit(a)->params)->priority; + rcu_read_unlock(); + + return priority; +} + #endif /* __NET_TC_SKBEDIT_H */ diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h index 0689d9bcdf84..e1057b255f69 100644 --- a/include/net/tc_act/tc_tunnel_key.h +++ b/include/net/tc_act/tc_tunnel_key.h @@ -28,8 +28,10 @@ static inline bool is_tcf_tunnel_set(const struct tc_action *a) { #ifdef CONFIG_NET_CLS_ACT struct tcf_tunnel_key *t = to_tunnel_key(a); - struct tcf_tunnel_key_params *params = rtnl_dereference(t->params); + struct tcf_tunnel_key_params *params; + params = rcu_dereference_protected(t->params, + lockdep_is_held(&a->tcfa_lock)); if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY) return params->tcft_action == TCA_TUNNEL_KEY_ACT_SET; #endif @@ -40,8 +42,10 @@ static inline bool is_tcf_tunnel_release(const struct tc_action *a) { #ifdef CONFIG_NET_CLS_ACT struct tcf_tunnel_key *t = to_tunnel_key(a); - struct tcf_tunnel_key_params *params = rtnl_dereference(t->params); + struct tcf_tunnel_key_params *params; + params = rcu_dereference_protected(t->params, + lockdep_is_held(&a->tcfa_lock)); if (a->ops && a->ops->id == TCA_ID_TUNNEL_KEY) return params->tcft_action == TCA_TUNNEL_KEY_ACT_RELEASE; #endif @@ -69,7 +73,7 @@ tcf_tunnel_info_copy(const struct tc_action *a) if (tun) { size_t tun_size = sizeof(*tun) + tun->options_len; struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size, - GFP_KERNEL); + GFP_ATOMIC); return tun_copy; } diff --git a/include/net/tcp.h b/include/net/tcp.h index a5ea27df3c2b..5fa9eacd965a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2195,14 +2195,21 @@ void tcp_update_ulp(struct sock *sk, struct proto *p, struct sk_msg; struct sk_psock; -int tcp_bpf_init(struct sock *sk); -void tcp_bpf_reinit(struct sock *sk); +#ifdef CONFIG_BPF_STREAM_PARSER +struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); +void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); +#else +static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) +{ +} +#endif /* CONFIG_BPF_STREAM_PARSER */ + +#ifdef CONFIG_NET_SOCK_MSG int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, int flags); -int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len); int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags); +#endif /* CONFIG_NET_SOCK_MSG */ /* Call BPF_SOCK_OPS program that returns an int. If the return value * is < 0, then the BPF op failed (for example if the loaded BPF diff --git a/include/net/udp.h b/include/net/udp.h index e55d5f765807..a8fa6c0c6ded 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -503,4 +503,9 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, return segs; } +#ifdef CONFIG_BPF_STREAM_PARSER +struct sk_psock; +struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); +#endif /* BPF_STREAM_PARSER */ + #endif /* _UDP_H */ diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 870b5e6c06db..e06d13388ae7 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -39,6 +39,7 @@ int rdma_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); +void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr); const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, const union ib_gid *gid, enum ib_gid_type gid_type, diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 8ec482e391aa..058cfbc2b37f 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -360,7 +360,6 @@ struct ib_cm_req_param { u32 starting_psn; const void *private_data; u8 private_data_len; - u8 peer_to_peer; u8 responder_resources; u8 initiator_depth; u8 remote_cm_response_timeout; diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h index f8982e4e9702..2fd9bfb6d648 100644 --- a/include/rdma/ib_fmr_pool.h +++ b/include/rdma/ib_fmr_pool.h @@ -73,7 +73,7 @@ struct ib_pool_fmr { int remap_count; u64 io_virtual_address; int page_list_len; - u64 page_list[0]; + u64 page_list[]; }; struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 1f779fad3a1e..bbc5cfb57cd2 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1876,7 +1876,7 @@ struct ib_flow_eth_filter { __be16 ether_type; __be16 vlan_tag; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_eth { @@ -1890,7 +1890,7 @@ struct ib_flow_ib_filter { __be16 dlid; __u8 sl; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_ib { @@ -1915,7 +1915,7 @@ struct ib_flow_ipv4_filter { u8 ttl; u8 flags; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_ipv4 { @@ -1933,7 +1933,7 @@ struct ib_flow_ipv6_filter { u8 traffic_class; u8 hop_limit; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_ipv6 { @@ -1947,7 +1947,7 @@ struct ib_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_tcp_udp { @@ -1959,7 +1959,7 @@ struct ib_flow_spec_tcp_udp { struct ib_flow_tunnel_filter { __be32 tunnel_id; - u8 real_sz[0]; + u8 real_sz[]; }; /* ib_flow_spec_tunnel describes the Vxlan tunnel @@ -1976,7 +1976,7 @@ struct ib_flow_esp_filter { __be32 spi; __be32 seq; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_esp { @@ -1991,7 +1991,7 @@ struct ib_flow_gre_filter { __be16 protocol; __be32 key; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_gre { @@ -2004,7 +2004,7 @@ struct ib_flow_spec_gre { struct ib_flow_mpls_filter { __be32 tag; /* Must be last */ - u8 real_sz[0]; + u8 real_sz[]; }; struct ib_flow_spec_mpls { @@ -3627,35 +3627,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq, bad_recv_wr ? : &dummy); } -/** - * ib_create_qp_user - Creates a QP associated with the specified protection - * domain. - * @pd: The protection domain associated with the QP. - * @qp_init_attr: A list of initial attributes required to create the - * QP. If QP creation succeeds, then the attributes are updated to - * the actual capabilities of the created QP. - * @udata: Valid user data or NULL for kernel objects - */ -struct ib_qp *ib_create_qp_user(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata); - -/** - * ib_create_qp - Creates a kernel QP associated with the specified protection - * domain. - * @pd: The protection domain associated with the QP. - * @qp_init_attr: A list of initial attributes required to create the - * QP. If QP creation succeeds, then the attributes are updated to - * the actual capabilities of the created QP. - * @udata: Valid user data or NULL for kernel objects - * - * NOTE: for user qp use ib_create_qp_user with valid udata! - */ -static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr) -{ - return ib_create_qp_user(pd, qp_init_attr, NULL); -} +struct ib_qp *ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr); /** * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h index 0c07a70bd7f6..e90b149fe92a 100644 --- a/include/rdma/opa_vnic.h +++ b/include/rdma/opa_vnic.h @@ -75,7 +75,7 @@ struct opa_vnic_rdma_netdev { struct rdma_netdev rn; /* keep this first */ /* followed by device private data */ - char *dev_priv[0]; + char *dev_priv[]; }; static inline void *opa_vnic_priv(const struct net_device *dev) diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index 72a3856d4057..ce6c888f7fe7 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -85,7 +85,7 @@ struct rvt_mregion { u8 lkey_published; /* in global table */ struct percpu_ref refcount; struct completion comp; /* complete when refcount goes to zero */ - struct rvt_segarray *map[0]; /* the segments */ + struct rvt_segarray *map[]; /* the segments */ }; #define RVT_MAX_LKEY_TABLE_BITS 23 diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 0d5c70e2d8ab..5fc10108703a 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -191,7 +191,7 @@ struct rvt_swqe { u32 ssn; /* send sequence number */ u32 length; /* total length of data in sg_list */ void *priv; /* driver dependent field */ - struct rvt_sge sg_list[0]; + struct rvt_sge sg_list[]; }; /** diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 28570ac2b6a0..9f3b1e004046 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -173,7 +173,7 @@ enum uapi_radix_data { UVERBS_API_OBJ_KEY_BITS = 5, UVERBS_API_OBJ_KEY_SHIFT = UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT, - UVERBS_API_OBJ_KEY_NUM_CORE = 24, + UVERBS_API_OBJ_KEY_NUM_CORE = 20, UVERBS_API_OBJ_KEY_NUM_DRIVER = (1 << UVERBS_API_OBJ_KEY_BITS) - UVERBS_API_OBJ_KEY_NUM_CORE, UVERBS_API_OBJ_KEY_MASK = GENMASK(31, UVERBS_API_OBJ_KEY_SHIFT), diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 92b11c7e0b4f..5225a23f2d0e 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -60,6 +60,7 @@ enum iscsi_uevent_e { ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30, ISCSI_UEVENT_SET_CHAP = UEVENT_BASE + 31, ISCSI_UEVENT_GET_HOST_STATS = UEVENT_BASE + 32, + ISCSI_UEVENT_DESTROY_SESSION_ASYNC = UEVENT_BASE + 33, /* up events */ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, @@ -311,7 +312,7 @@ enum iscsi_param_type { struct iscsi_param_info { uint32_t len; /* Actual length of the param value */ uint16_t param; /* iscsi param */ - uint8_t value[0]; /* length sized value follows */ + uint8_t value[]; /* length sized value follows */ } __packed; struct iscsi_iface_param_info { @@ -320,7 +321,7 @@ struct iscsi_iface_param_info { uint16_t param; /* iscsi param value */ uint8_t iface_type; /* IPv4 or IPv6 */ uint8_t param_type; /* iscsi_param_type */ - uint8_t value[0]; /* length sized value follows */ + uint8_t value[]; /* length sized value follows */ } __packed; /* @@ -697,7 +698,7 @@ enum iscsi_flashnode_param { struct iscsi_flashnode_param_info { uint32_t len; /* Actual length of the param */ uint16_t param; /* iscsi param value */ - uint8_t value[0]; /* length sized value follows */ + uint8_t value[]; /* length sized value follows */ } __packed; enum iscsi_discovery_parent_type { @@ -815,7 +816,7 @@ struct iscsi_stats { * up to ISCSI_STATS_CUSTOM_MAX */ uint32_t custom_length; - struct iscsi_stats_custom custom[0] + struct iscsi_stats_custom custom[] __attribute__ ((aligned (sizeof(uint64_t)))); }; @@ -946,7 +947,7 @@ struct iscsi_offload_host_stats { * up to ISCSI_HOST_STATS_CUSTOM_MAX */ uint32_t custom_length; - struct iscsi_host_stats_custom custom[0] + struct iscsi_host_stats_custom custom[] __aligned(sizeof(uint64_t)); }; diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h index fa0c820a1663..6b8128005af8 100644 --- a/include/scsi/scsi_bsg_iscsi.h +++ b/include/scsi/scsi_bsg_iscsi.h @@ -52,7 +52,7 @@ struct iscsi_bsg_host_vendor { uint64_t vendor_id; /* start of vendor command area */ - uint32_t vendor_cmd[0]; + uint32_t vendor_cmd[]; }; /* Response: diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index a2849bb9cd19..80ac89e47b47 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -159,7 +159,6 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) return *(struct scsi_driver **)cmd->request->rq_disk->private_data; } -extern void scsi_put_command(struct scsi_cmnd *); extern void scsi_finish_command(struct scsi_cmnd *cmd); extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index f8312a3e5b42..c3cba2aaf934 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -110,7 +110,6 @@ struct scsi_device { atomic_t device_blocked; /* Device returned QUEUE_FULL. */ spinlock_t list_lock; - struct list_head cmd_list; /* queue of in use SCSI Command structures */ struct list_head starved_entry; unsigned short queue_depth; /* How deep of a queue we want */ unsigned short max_queue_depth; /* max queue depth */ @@ -204,6 +203,9 @@ struct scsi_device { unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device * creation time */ + + bool offline_already; /* Device offline message logged */ + atomic_t disk_events_disable_depth; /* disable depth for disk events */ DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ @@ -231,7 +233,7 @@ struct scsi_device { struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; - unsigned long sdev_data[0]; + unsigned long sdev_data[]; } __attribute__((aligned(sizeof(unsigned long)))); #define to_scsi_device(d) \ @@ -315,7 +317,7 @@ struct scsi_target { char scsi_level; enum scsi_target_state state; void *hostdata; /* available to low-level driver */ - unsigned long starget_data[0]; /* for the transport */ + unsigned long starget_data[]; /* for the transport */ /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index f577647bf5f2..822e8cda8d9b 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -62,6 +62,9 @@ struct scsi_host_template { void __user *arg); #endif + int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); + int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); + /* * The queuecommand function is used to queue up a scsi * command block to the LLDD. When the driver finished @@ -426,9 +429,6 @@ struct scsi_host_template { /* True if the controller does not support WRITE SAME */ unsigned no_write_same:1; - /* True if the low-level driver supports blk-mq only */ - unsigned force_blk_mq:1; - /* * Countdown for host blocking with no commands outstanding. */ @@ -627,8 +627,6 @@ struct Scsi_Host { /* The controller does not support WRITE SAME */ unsigned no_write_same:1; - unsigned use_cmd_list:1; - /* Host responded with short (<36 bytes) INQUIRY result */ unsigned short_inquiry:1; @@ -685,7 +683,7 @@ struct Scsi_Host { * and also because some compilers (m68k) don't automatically force * alignment to a long boundary. */ - unsigned long hostdata[0] /* Used for storage of host specific stuff */ + unsigned long hostdata[] /* Used for storage of host specific stuff */ __attribute__ ((aligned (sizeof(unsigned long)))); }; @@ -735,6 +733,8 @@ extern int scsi_host_busy(struct Scsi_Host *shost); extern void scsi_host_put(struct Scsi_Host *t); extern struct Scsi_Host *scsi_host_lookup(unsigned short); extern const char *scsi_host_state_name(enum scsi_host_state); +extern void scsi_host_complete_all_commands(struct Scsi_Host *shost, + int status); static inline int __must_check scsi_add_host(struct Scsi_Host *host, struct device *dev) @@ -759,6 +759,11 @@ static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) extern void scsi_unblock_requests(struct Scsi_Host *); extern void scsi_block_requests(struct Scsi_Host *); +extern int scsi_host_block(struct Scsi_Host *shost); +extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state); + +void scsi_host_busy_iter(struct Scsi_Host *, + bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv); struct class_container; diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h index 4fe69d863b5d..b465799f4d2d 100644 --- a/include/scsi/scsi_ioctl.h +++ b/include/scsi/scsi_ioctl.h @@ -27,7 +27,7 @@ struct scsi_device; typedef struct scsi_ioctl_command { unsigned int inlen; unsigned int outlen; - unsigned char data[0]; + unsigned char data[]; } Scsi_Ioctl_Command; typedef struct scsi_idlun { diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 325ae731d9ad..bdcb6d69d154 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -188,8 +188,16 @@ extern void iscsi_ping_comp_event(uint32_t host_no, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data); +/* iscsi class connection state */ +enum iscsi_connection_state { + ISCSI_CONN_UP = 0, + ISCSI_CONN_DOWN, + ISCSI_CONN_FAILED, +}; + struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ + struct list_head conn_list_err; /* item in connlist_err */ void *dd_data; /* LLD private data */ struct iscsi_transport *transport; uint32_t cid; /* connection id */ @@ -197,6 +205,7 @@ struct iscsi_cls_conn { struct iscsi_endpoint *ep; struct device dev; /* sysfs transport/container device */ + enum iscsi_connection_state state; }; #define iscsi_dev_to_conn(_dev) \ @@ -225,6 +234,7 @@ struct iscsi_cls_session { struct work_struct unblock_work; struct work_struct scan_work; struct work_struct unbind_work; + struct work_struct destroy_work; /* recovery fields */ int recovery_tmo; diff --git a/include/scsi/scsicam.h b/include/scsi/scsicam.h index 57c729254569..08edd603e521 100644 --- a/include/scsi/scsicam.h +++ b/include/scsi/scsicam.h @@ -13,8 +13,7 @@ #ifndef SCSICAM_H #define SCSICAM_H -extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip); -extern int scsi_partsize(unsigned char *buf, unsigned long capacity, - unsigned int *cyls, unsigned int *hds, unsigned int *secs); -extern unsigned char *scsi_bios_ptable(struct block_device *bdev); +int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip); +bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]); +unsigned char *scsi_bios_ptable(struct block_device *bdev); #endif /* def SCSICAM_H */ diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 29c7ad04d2e2..7327e12f3373 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -24,7 +24,7 @@ * http://sg.danny.cz/sg [alternatively check the MAINTAINERS file] * The documentation for the sg version 3 driver can be found at: * http://sg.danny.cz/sg/p/sg_v3_ho.html - * Also see: <kernel_source>/Documentation/scsi/scsi-generic.txt + * Also see: <kernel_source>/Documentation/scsi/scsi-generic.rst * * For utility and test programs see: http://sg.danny.cz/sg/sg3_utils.html */ diff --git a/include/scsi/srp.h b/include/scsi/srp.h index 9220758d5087..177d8026e96f 100644 --- a/include/scsi/srp.h +++ b/include/scsi/srp.h @@ -109,7 +109,7 @@ struct srp_direct_buf { struct srp_indirect_buf { struct srp_direct_buf table_desc; __be32 len; - struct srp_direct_buf desc_list[0]; + struct srp_direct_buf desc_list[]; } __attribute__((packed)); /* Immediate data buffer descriptor as defined in SRP2. */ @@ -244,7 +244,7 @@ struct srp_cmd { u8 reserved4; u8 add_cdb_len; u8 cdb[16]; - u8 add_data[0]; + u8 add_data[]; }; enum { @@ -274,7 +274,7 @@ struct srp_rsp { __be32 data_in_res_cnt; __be32 sense_data_len; __be32 resp_data_len; - u8 data[0]; + u8 data[]; } __attribute__((packed)); struct srp_cred_req { @@ -306,7 +306,7 @@ struct srp_aer_req { struct scsi_lun lun; __be32 sense_data_len; u32 reserved3; - u8 sense_data[0]; + u8 sense_data[]; } __attribute__((packed)); struct srp_aer_rsp { diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h index 672cfb58046f..c9d849924f89 100644 --- a/include/soc/fsl/dpaa2-io.h +++ b/include/soc/fsl/dpaa2-io.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* * Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright NXP + * Copyright 2017-2019 NXP * */ #ifndef __FSL_DPAA2_IO_H @@ -109,6 +109,10 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid, int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid, const struct dpaa2_fd *fd); +int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid, + const struct dpaa2_fd *fd, int number_of_frame); +int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid, + const struct dpaa2_fd *fd, int number_of_frame); int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio, u16 qdbin, const struct dpaa2_fd *fd); int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid, diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h index ba0e838f962a..dc4e79468094 100644 --- a/include/soc/fsl/qe/ucc_fast.h +++ b/include/soc/fsl/qe/ucc_fast.h @@ -178,10 +178,10 @@ struct ucc_fast_info { struct ucc_fast_private { struct ucc_fast_info *uf_info; struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */ - u32 __iomem *p_ucce; /* a pointer to the event register in memory. */ - u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ + __be32 __iomem *p_ucce; /* a pointer to the event register in memory. */ + __be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */ #ifdef CONFIG_UGETH_TX_ON_DEMAND - u16 __iomem *p_utodr; /* pointer to the transmit on demand register */ + __be16 __iomem *p_utodr;/* pointer to the transmit on demand register */ #endif int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h index d187a6be83bc..11a216e4e919 100644 --- a/include/soc/fsl/qe/ucc_slow.h +++ b/include/soc/fsl/qe/ucc_slow.h @@ -184,7 +184,7 @@ struct ucc_slow_info { struct ucc_slow_private { struct ucc_slow_info *us_info; struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */ - struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */ + struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */ s32 us_pram_offset; int enabled_tx; /* Whether channel is enabled for Tx (ENT) */ int enabled_rx; /* Whether channel is enabled for Rx (ENR) */ @@ -196,13 +196,12 @@ struct ucc_slow_private { and length for first BD in a frame */ s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */ s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */ - struct qe_bd *confBd; /* next BD for confirm after Tx */ - struct qe_bd *tx_bd; /* next BD for new Tx request */ - struct qe_bd *rx_bd; /* next BD to collect after Rx */ + struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */ + struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */ + struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */ void *p_rx_frame; /* accumulating receive frame */ - u16 *p_ucce; /* a pointer to the event register in memory. - */ - u16 *p_uccm; /* a pointer to the mask register in memory */ + __be16 __iomem *p_ucce; /* a pointer to the event register in memory */ + __be16 __iomem *p_uccm; /* a pointer to the mask register in memory */ u16 saved_uccm; /* a saved mask for the RX Interrupt bits */ #ifdef STATISTICS u32 tx_frames; /* Transmitted frames counters */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 068f96b1a83e..ebffcb36a7e3 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -11,6 +11,66 @@ #include <linux/regmap.h> #include <net/dsa.h> +/* Port Group IDs (PGID) are masks of destination ports. + * + * For L2 forwarding, the switch performs 3 lookups in the PGID table for each + * frame, and forwards the frame to the ports that are present in the logical + * AND of all 3 PGIDs. + * + * These PGID lookups are: + * - In one of PGID[0-63]: for the destination masks. There are 2 paths by + * which the switch selects a destination PGID: + * - The {DMAC, VID} is present in the MAC table. In that case, the + * destination PGID is given by the DEST_IDX field of the MAC table entry + * that matched. + * - The {DMAC, VID} is not present in the MAC table (it is unknown). The + * frame is disseminated as being either unicast, multicast or broadcast, + * and according to that, the destination PGID is chosen as being the + * value contained by ANA_FLOODING_FLD_UNICAST, + * ANA_FLOODING_FLD_MULTICAST or ANA_FLOODING_FLD_BROADCAST. + * The destination PGID can be an unicast set: the first PGIDs, 0 to + * ocelot->num_phys_ports - 1, or a multicast set: the PGIDs from + * ocelot->num_phys_ports to 63. By convention, a unicast PGID corresponds to + * a physical port and has a single bit set in the destination ports mask: + * that corresponding to the port number itself. In contrast, a multicast + * PGID will have potentially more than one single bit set in the destination + * ports mask. + * - In one of PGID[64-79]: for the aggregation mask. The switch classifier + * dissects each frame and generates a 4-bit Link Aggregation Code which is + * used for this second PGID table lookup. The goal of link aggregation is to + * hash multiple flows within the same LAG on to different destination ports. + * The first lookup will result in a PGID with all the LAG members present in + * the destination ports mask, and the second lookup, by Link Aggregation + * Code, will ensure that each flow gets forwarded only to a single port out + * of that mask (there are no duplicates). + * - In one of PGID[80-90]: for the source mask. The third time, the PGID table + * is indexed with the ingress port (plus 80). These PGIDs answer the + * question "is port i allowed to forward traffic to port j?" If yes, then + * BIT(j) of PGID 80+i will be found set. The third PGID lookup can be used + * to enforce the L2 forwarding matrix imposed by e.g. a Linux bridge. + */ + +/* Reserve some destination PGIDs at the end of the range: + * PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses + * of the switch port net devices, towards the CPU port module. + * PGID_UC: the flooding destinations for unknown unicast traffic. + * PGID_MC: the flooding destinations for broadcast and non-IP multicast + * traffic. + * PGID_MCIPV4: the flooding destinations for IPv4 multicast traffic. + * PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic. + */ +#define PGID_CPU 59 +#define PGID_UC 60 +#define PGID_MC 61 +#define PGID_MCIPV4 62 +#define PGID_MCIPV6 63 + +/* Aggregation PGIDs, one per Link Aggregation Code */ +#define PGID_AGGR 64 + +/* Source PGIDs, one per physical port */ +#define PGID_SRC 80 + #define IFH_INJ_BYPASS BIT(31) #define IFH_INJ_POP_CNT_DISABLE (3 << 28) @@ -402,10 +462,15 @@ enum ocelot_tag_prefix { struct ocelot; struct ocelot_ops { - void (*pcs_init)(struct ocelot *ocelot, int port); int (*reset)(struct ocelot *ocelot); }; +struct ocelot_acl_block { + struct list_head rules; + int count; + int pol_lpr; +}; + struct ocelot_port { struct ocelot *ocelot; @@ -447,14 +512,27 @@ struct ocelot { /* Keep track of the vlan port masks */ u32 vlan_mask[VLAN_N_VID]; + /* In tables like ANA:PORT and the ANA:PGID:PGID mask, + * the CPU is located after the physical ports (at the + * num_phys_ports index). + */ u8 num_phys_ports; - u8 num_cpu_ports; - u8 cpu; + + int npi; + + enum ocelot_tag_prefix inj_prefix; + enum ocelot_tag_prefix xtr_prefix; u32 *lags; struct list_head multicast; + struct ocelot_acl_block acl_block; + + const struct vcap_field *vcap_is2_keys; + const struct vcap_field *vcap_is2_actions; + const struct vcap_props *vcap; + /* Workqueue to check statistics for overflow with its lock */ struct mutex stats_lock; u64 *stats; @@ -469,8 +547,11 @@ struct ocelot { struct mutex ptp_lock; /* Protects the PTP clock */ spinlock_t ptp_clock_lock; +}; - void (*port_pcs_init)(struct ocelot_port *port); +struct ocelot_policer { + u32 rate; /* kilobit per second */ + u32 burst; /* bytes */ }; #define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) @@ -500,9 +581,9 @@ void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, int ocelot_regfields_init(struct ocelot *ocelot, const struct reg_field *const regfields); struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res); -void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, - enum ocelot_tag_prefix injection, - enum ocelot_tag_prefix extraction); +void ocelot_configure_cpu(struct ocelot *ocelot, int npi, + enum ocelot_tag_prefix injection, + enum ocelot_tag_prefix extraction); int ocelot_init(struct ocelot *ocelot); void ocelot_deinit(struct ocelot *ocelot); void ocelot_init_port(struct ocelot *ocelot, int port); @@ -541,5 +622,16 @@ int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, struct sk_buff *skb); void ocelot_get_txtstamp(struct ocelot *ocelot); +void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu); +int ocelot_get_max_mtu(struct ocelot *ocelot, int port); +int ocelot_port_policer_add(struct ocelot *ocelot, int port, + struct ocelot_policer *pol); +int ocelot_port_policer_del(struct ocelot *ocelot, int port); +int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, bool ingress); +int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, bool ingress); +int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, + struct flow_cls_offload *f, bool ingress); #endif diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h index 0a50d53bbd3f..7c08437061fc 100644 --- a/include/soc/mscc/ocelot_dev.h +++ b/include/soc/mscc/ocelot_dev.h @@ -74,7 +74,7 @@ #define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) #define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) #define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) -#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) #define DEV_MAC_ADV_CHK_CFG 0x2c diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h new file mode 100644 index 000000000000..5748373ab4d3 --- /dev/null +++ b/include/soc/mscc/ocelot_vcap.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) + * Microsemi Ocelot Switch driver + * Copyright (c) 2019 Microsemi Corporation + */ + +#ifndef _OCELOT_VCAP_H_ +#define _OCELOT_VCAP_H_ + +/* ================================================================= + * VCAP Common + * ================================================================= + */ + +enum { + /* VCAP_IS1, */ + VCAP_IS2, + /* VCAP_ES0, */ +}; + +struct vcap_props { + u16 tg_width; /* Type-group width (in bits) */ + u16 sw_count; /* Sub word count */ + u16 entry_count; /* Entry count */ + u16 entry_words; /* Number of entry words */ + u16 entry_width; /* Entry width (in bits) */ + u16 action_count; /* Action count */ + u16 action_words; /* Number of action words */ + u16 action_width; /* Action width (in bits) */ + u16 action_type_width; /* Action type width (in bits) */ + struct { + u16 width; /* Action type width (in bits) */ + u16 count; /* Action type sub word count */ + } action_table[2]; + u16 counter_words; /* Number of counter words */ + u16 counter_width; /* Counter width (in bits) */ +}; + +/* VCAP Type-Group values */ +#define VCAP_TG_NONE 0 /* Entry is invalid */ +#define VCAP_TG_FULL 1 /* Full entry */ +#define VCAP_TG_HALF 2 /* Half entry */ +#define VCAP_TG_QUARTER 3 /* Quarter entry */ + +/* ================================================================= + * VCAP IS2 + * ================================================================= + */ + +/* IS2 half key types */ +#define IS2_TYPE_ETYPE 0 +#define IS2_TYPE_LLC 1 +#define IS2_TYPE_SNAP 2 +#define IS2_TYPE_ARP 3 +#define IS2_TYPE_IP_UDP_TCP 4 +#define IS2_TYPE_IP_OTHER 5 +#define IS2_TYPE_IPV6 6 +#define IS2_TYPE_OAM 7 +#define IS2_TYPE_SMAC_SIP6 8 +#define IS2_TYPE_ANY 100 /* Pseudo type */ + +/* IS2 half key type mask for matching any IP */ +#define IS2_TYPE_MASK_IP_ANY 0xe + +enum { + IS2_ACTION_TYPE_NORMAL, + IS2_ACTION_TYPE_SMAC_SIP, + IS2_ACTION_TYPE_MAX, +}; + +/* IS2 MASK_MODE values */ +#define IS2_ACT_MASK_MODE_NONE 0 +#define IS2_ACT_MASK_MODE_FILTER 1 +#define IS2_ACT_MASK_MODE_POLICY 2 +#define IS2_ACT_MASK_MODE_REDIR 3 + +/* IS2 REW_OP values */ +#define IS2_ACT_REW_OP_NONE 0 +#define IS2_ACT_REW_OP_PTP_ONE 2 +#define IS2_ACT_REW_OP_PTP_TWO 3 +#define IS2_ACT_REW_OP_SPECIAL 8 +#define IS2_ACT_REW_OP_PTP_ORG 9 +#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_1 (IS2_ACT_REW_OP_PTP_ONE | (1 << 3)) +#define IS2_ACT_REW_OP_PTP_ONE_SUB_DELAY_2 (IS2_ACT_REW_OP_PTP_ONE | (2 << 3)) +#define IS2_ACT_REW_OP_PTP_ONE_ADD_DELAY (IS2_ACT_REW_OP_PTP_ONE | (1 << 5)) +#define IS2_ACT_REW_OP_PTP_ONE_ADD_SUB BIT(7) + +#define VCAP_PORT_WIDTH 4 + +/* IS2 quarter key - SMAC_SIP4 */ +#define IS2_QKO_IGR_PORT 0 +#define IS2_QKL_IGR_PORT VCAP_PORT_WIDTH +#define IS2_QKO_L2_SMAC (IS2_QKO_IGR_PORT + IS2_QKL_IGR_PORT) +#define IS2_QKL_L2_SMAC 48 +#define IS2_QKO_L3_IP4_SIP (IS2_QKO_L2_SMAC + IS2_QKL_L2_SMAC) +#define IS2_QKL_L3_IP4_SIP 32 + +enum vcap_is2_half_key_field { + /* Common */ + VCAP_IS2_TYPE, + VCAP_IS2_HK_FIRST, + VCAP_IS2_HK_PAG, + VCAP_IS2_HK_RSV1, + VCAP_IS2_HK_IGR_PORT_MASK, + VCAP_IS2_HK_RSV2, + VCAP_IS2_HK_HOST_MATCH, + VCAP_IS2_HK_L2_MC, + VCAP_IS2_HK_L2_BC, + VCAP_IS2_HK_VLAN_TAGGED, + VCAP_IS2_HK_VID, + VCAP_IS2_HK_DEI, + VCAP_IS2_HK_PCP, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + VCAP_IS2_HK_L2_DMAC, + VCAP_IS2_HK_L2_SMAC, + /* MAC_ETYPE (TYPE=000) */ + VCAP_IS2_HK_MAC_ETYPE_ETYPE, + VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0, + VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1, + VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2, + /* MAC_LLC (TYPE=001) */ + VCAP_IS2_HK_MAC_LLC_DMAC, + VCAP_IS2_HK_MAC_LLC_SMAC, + VCAP_IS2_HK_MAC_LLC_L2_LLC, + /* MAC_SNAP (TYPE=010) */ + VCAP_IS2_HK_MAC_SNAP_SMAC, + VCAP_IS2_HK_MAC_SNAP_DMAC, + VCAP_IS2_HK_MAC_SNAP_L2_SNAP, + /* MAC_ARP (TYPE=011) */ + VCAP_IS2_HK_MAC_ARP_SMAC, + VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK, + VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK, + VCAP_IS2_HK_MAC_ARP_LEN_OK, + VCAP_IS2_HK_MAC_ARP_TARGET_MATCH, + VCAP_IS2_HK_MAC_ARP_SENDER_MATCH, + VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN, + VCAP_IS2_HK_MAC_ARP_OPCODE, + VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP, + VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP, + VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP, + /* IP4_TCP_UDP / IP4_OTHER common */ + VCAP_IS2_HK_IP4, + VCAP_IS2_HK_L3_FRAGMENT, + VCAP_IS2_HK_L3_FRAG_OFS_GT0, + VCAP_IS2_HK_L3_OPTIONS, + VCAP_IS2_HK_IP4_L3_TTL_GT0, + VCAP_IS2_HK_L3_TOS, + VCAP_IS2_HK_L3_IP4_DIP, + VCAP_IS2_HK_L3_IP4_SIP, + VCAP_IS2_HK_DIP_EQ_SIP, + /* IP4_TCP_UDP (TYPE=100) */ + VCAP_IS2_HK_TCP, + VCAP_IS2_HK_L4_SPORT, + VCAP_IS2_HK_L4_DPORT, + VCAP_IS2_HK_L4_RNG, + VCAP_IS2_HK_L4_SPORT_EQ_DPORT, + VCAP_IS2_HK_L4_SEQUENCE_EQ0, + VCAP_IS2_HK_L4_URG, + VCAP_IS2_HK_L4_ACK, + VCAP_IS2_HK_L4_PSH, + VCAP_IS2_HK_L4_RST, + VCAP_IS2_HK_L4_SYN, + VCAP_IS2_HK_L4_FIN, + VCAP_IS2_HK_L4_1588_DOM, + VCAP_IS2_HK_L4_1588_VER, + /* IP4_OTHER (TYPE=101) */ + VCAP_IS2_HK_IP4_L3_PROTO, + VCAP_IS2_HK_L3_PAYLOAD, + /* IP6_STD (TYPE=110) */ + VCAP_IS2_HK_IP6_L3_TTL_GT0, + VCAP_IS2_HK_IP6_L3_PROTO, + VCAP_IS2_HK_L3_IP6_SIP, + /* OAM (TYPE=111) */ + VCAP_IS2_HK_OAM_MEL_FLAGS, + VCAP_IS2_HK_OAM_VER, + VCAP_IS2_HK_OAM_OPCODE, + VCAP_IS2_HK_OAM_FLAGS, + VCAP_IS2_HK_OAM_MEPID, + VCAP_IS2_HK_OAM_CCM_CNTS_EQ0, + VCAP_IS2_HK_OAM_IS_Y1731, +}; + +struct vcap_field { + int offset; + int length; +}; + +enum vcap_is2_action_field { + VCAP_IS2_ACT_HIT_ME_ONCE, + VCAP_IS2_ACT_CPU_COPY_ENA, + VCAP_IS2_ACT_CPU_QU_NUM, + VCAP_IS2_ACT_MASK_MODE, + VCAP_IS2_ACT_MIRROR_ENA, + VCAP_IS2_ACT_LRN_DIS, + VCAP_IS2_ACT_POLICE_ENA, + VCAP_IS2_ACT_POLICE_IDX, + VCAP_IS2_ACT_POLICE_VCAP_ONLY, + VCAP_IS2_ACT_PORT_MASK, + VCAP_IS2_ACT_REW_OP, + VCAP_IS2_ACT_SMAC_REPLACE_ENA, + VCAP_IS2_ACT_RSV, + VCAP_IS2_ACT_ACL_ID, + VCAP_IS2_ACT_HIT_CNT, +}; + +#endif /* _OCELOT_VCAP_H_ */ diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h index 619e07c75da9..f9ec353d24a5 100644 --- a/include/soc/qcom/rpmh.h +++ b/include/soc/qcom/rpmh.h @@ -20,8 +20,6 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state, int rpmh_write_batch(const struct device *dev, enum rpmh_state state, const struct tcs_cmd *cmd, u32 *n); -int rpmh_flush(const struct device *dev); - int rpmh_invalidate(const struct device *dev); #else @@ -40,9 +38,6 @@ static inline int rpmh_write_batch(const struct device *dev, const struct tcs_cmd *cmd, u32 *n) { return -ENODEV; } -static inline int rpmh_flush(const struct device *dev) -{ return -ENODEV; } - static inline int rpmh_invalidate(const struct device *dev) { return -ENODEV; } diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index cac6f610b3fe..8f8e73e5cd45 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. */ #ifndef _ABI_BPMP_ABI_H_ @@ -2119,6 +2119,7 @@ enum { CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2, CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3, CMD_UPHY_PCIE_CONTROLLER_STATE = 4, + CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF = 5, CMD_UPHY_MAX, }; @@ -2151,6 +2152,11 @@ struct cmd_uphy_pcie_controller_state_request { uint8_t enable; } __ABI_PACKED; +struct cmd_uphy_ep_controller_pll_off_request { + /** @brief EP controller number, valid: 0, 4, 5 */ + uint8_t ep_controller; +} __ABI_PACKED; + /** * @ingroup UPHY * @brief Request with #MRQ_UPHY @@ -2165,6 +2171,7 @@ struct cmd_uphy_pcie_controller_state_request { * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | | * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request | * |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request | + * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF |cmd_uphy_ep_controller_pll_off_request | * */ @@ -2178,6 +2185,7 @@ struct mrq_uphy_request { struct cmd_uphy_margin_control_request uphy_set_margin_control; struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init; struct cmd_uphy_pcie_controller_state_request controller_state; + struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off; } __UNION_ANON; } __ABI_PACKED; diff --git a/include/soc/tegra/cpuidle.h b/include/soc/tegra/cpuidle.h index 029ba1f4b2cc..5665975015d8 100644 --- a/include/soc/tegra/cpuidle.h +++ b/include/soc/tegra/cpuidle.h @@ -6,7 +6,7 @@ #ifndef __SOC_TEGRA_CPUIDLE_H__ #define __SOC_TEGRA_CPUIDLE_H__ -#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_CPU_IDLE) +#ifdef CONFIG_ARM_TEGRA_CPUIDLE void tegra_cpuidle_pcie_irqs_in_use(void); #else static inline void tegra_cpuidle_pcie_irqs_in_use(void) diff --git a/include/soc/tegra/irq.h b/include/soc/tegra/irq.h new file mode 100644 index 000000000000..8eb11a7109e4 --- /dev/null +++ b/include/soc/tegra/irq.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012, NVIDIA Corporation. All rights reserved. + */ + +#ifndef __SOC_TEGRA_IRQ_H +#define __SOC_TEGRA_IRQ_H + +#if defined(CONFIG_ARM) +bool tegra_pending_sgi(void); +#endif + +#endif /* __SOC_TEGRA_IRQ_H */ diff --git a/include/soc/tegra/pm.h b/include/soc/tegra/pm.h index 951fcd738d55..08477d7bfab9 100644 --- a/include/soc/tegra/pm.h +++ b/include/soc/tegra/pm.h @@ -6,6 +6,8 @@ #ifndef __SOC_TEGRA_PM_H__ #define __SOC_TEGRA_PM_H__ +#include <linux/errno.h> + enum tegra_suspend_mode { TEGRA_SUSPEND_NONE = 0, TEGRA_SUSPEND_LP2, /* CPU voltage off */ @@ -20,6 +22,12 @@ tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode); /* low-level resume entry point */ void tegra_resume(void); + +int tegra30_pm_secondary_cpu_suspend(unsigned long arg); +void tegra_pm_clear_cpu_in_lp2(void); +void tegra_pm_set_cpu_in_lp2(void); +int tegra_pm_enter_lp2(void); +int tegra_pm_park_secondary_cpu(unsigned long cpu); #else static inline enum tegra_suspend_mode tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode) @@ -30,6 +38,29 @@ tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode) static inline void tegra_resume(void) { } + +static inline int tegra30_pm_secondary_cpu_suspend(unsigned long arg) +{ + return -ENOTSUPP; +} + +static inline void tegra_pm_clear_cpu_in_lp2(void) +{ +} + +static inline void tegra_pm_set_cpu_in_lp2(void) +{ +} + +static inline int tegra_pm_enter_lp2(void) +{ + return -ENOTSUPP; +} + +static inline int tegra_pm_park_secondary_cpu(unsigned long cpu) +{ + return -ENOTSUPP; +} #endif /* CONFIG_PM_SLEEP */ #endif /* __SOC_TEGRA_PM_H__ */ diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index 57e58faf660b..0dd52b0a5c1b 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -113,8 +113,9 @@ enum tegra_io_pad { TEGRA_IO_PAD_PEX_CLK_BIAS, TEGRA_IO_PAD_PEX_CLK1, TEGRA_IO_PAD_PEX_CLK2, - TEGRA_IO_PAD_PEX_CLK2_BIAS, TEGRA_IO_PAD_PEX_CLK3, + TEGRA_IO_PAD_PEX_CLK_2_BIAS, + TEGRA_IO_PAD_PEX_CLK_2, TEGRA_IO_PAD_PEX_CNTRL, TEGRA_IO_PAD_PEX_CTL2, TEGRA_IO_PAD_PEX_L0_RST_N, diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index bc88d6f964da..6ce8effa0b12 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -23,7 +23,6 @@ struct snd_compr_ops; * struct snd_compr_runtime: runtime stream description * @state: stream state * @ops: pointer to DSP callbacks - * @dma_buffer_p: runtime dma buffer pointer * @buffer: pointer to kernel buffer, valid only when not in mmap mode or * DSP doesn't implement copy * @buffer_size: size of the above buffer @@ -34,11 +33,14 @@ struct snd_compr_ops; * @total_bytes_transferred: cumulative bytes transferred by offload DSP * @sleep: poll sleep * @private_data: driver private data pointer + * @dma_area: virtual buffer address + * @dma_addr: physical buffer address (not accessible from main CPU) + * @dma_bytes: size of DMA area + * @dma_buffer_p: runtime dma buffer pointer */ struct snd_compr_runtime { snd_pcm_state_t state; struct snd_compr_ops *ops; - struct snd_dma_buffer *dma_buffer_p; void *buffer; u64 buffer_size; u32 fragment_size; @@ -47,6 +49,11 @@ struct snd_compr_runtime { u64 total_bytes_transferred; wait_queue_head_t sleep; void *private_data; + + unsigned char *dma_area; + dma_addr_t dma_addr; + size_t dma_bytes; + struct snd_dma_buffer *dma_buffer_p; }; /** @@ -60,6 +67,7 @@ struct snd_compr_runtime { * @metadata_set: metadata set flag, true when set * @next_track: has userspace signal next track transition, true when set * @private_data: pointer to DSP private data + * @dma_buffer: allocated buffer if any */ struct snd_compr_stream { const char *name; @@ -71,6 +79,7 @@ struct snd_compr_stream { bool metadata_set; bool next_track; void *private_data; + struct snd_dma_buffer dma_buffer; }; /** @@ -180,21 +189,34 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) /** * snd_compr_set_runtime_buffer - Set the Compress runtime buffer - * @substream: compress substream to set + * @stream: compress stream to set * @bufp: the buffer information, NULL to clear * * Copy the buffer information to runtime buffer when @bufp is non-NULL. * Otherwise it clears the current buffer information. */ -static inline void snd_compr_set_runtime_buffer( - struct snd_compr_stream *substream, - struct snd_dma_buffer *bufp) +static inline void +snd_compr_set_runtime_buffer(struct snd_compr_stream *stream, + struct snd_dma_buffer *bufp) { - struct snd_compr_runtime *runtime = substream->runtime; - - runtime->dma_buffer_p = bufp; + struct snd_compr_runtime *runtime = stream->runtime; + + if (bufp) { + runtime->dma_buffer_p = bufp; + runtime->dma_area = bufp->area; + runtime->dma_addr = bufp->addr; + runtime->dma_bytes = bufp->bytes; + } else { + runtime->dma_buffer_p = NULL; + runtime->dma_area = NULL; + runtime->dma_addr = 0; + runtime->dma_bytes = 0; + } } +int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size); +int snd_compr_free_pages(struct snd_compr_stream *stream); + int snd_compr_stop_error(struct snd_compr_stream *stream, snd_pcm_state_t state); diff --git a/include/sound/core.h b/include/sound/core.h index ac8b692b69b4..381a010a1bd4 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -266,6 +266,7 @@ void snd_device_disconnect(struct snd_card *card, void *device_data); void snd_device_disconnect_all(struct snd_card *card); void snd_device_free(struct snd_card *card, void *device_data); void snd_device_free_all(struct snd_card *card); +int snd_device_get_state(struct snd_card *card, void *device_data); /* isadma.c */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index d4299e146d95..affedc2801c4 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -513,6 +513,7 @@ struct hdac_stream { struct snd_pcm_substream *substream; /* assigned substream, * set in PCM open */ + struct snd_compr_stream *cstream; unsigned int format_val; /* format value to be set in the * controller and the codec */ @@ -527,6 +528,7 @@ struct hdac_stream { bool locked:1; bool stripe:1; /* apply stripe control */ + u64 curr_pos; /* timestamp */ unsigned long start_wallclk; /* start + minimum wallclk */ unsigned long period_wallclk; /* wallclk for period */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index f657ff08f317..2ba5df2c9e23 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -644,6 +644,11 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, #define snd_pcm_group_for_each_entry(s, substream) \ list_for_each_entry(s, &substream->group->substreams, link_list) +#define for_each_pcm_streams(stream) \ + for (stream = SNDRV_PCM_STREAM_PLAYBACK; \ + stream <= SNDRV_PCM_STREAM_LAST; \ + stream++) + /** * snd_pcm_running - Check whether the substream is in a running state * @substream: substream to check @@ -1122,7 +1127,14 @@ snd_pcm_kernel_readv(struct snd_pcm_substream *substream, return __snd_pcm_lib_xfer(substream, bufs, false, frames, true); } -int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime); +int snd_pcm_hw_limit_rates(struct snd_pcm_hardware *hw); + +static inline int +snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime) +{ + return snd_pcm_hw_limit_rates(&runtime->hw); +} + unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate); unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit); unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a, @@ -1415,6 +1427,15 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) return 1ULL << (__force int) pcm_format; } +/** + * pcm_for_each_format - helper to iterate for each format type + * @f: the iterator variable in snd_pcm_format_t type + */ +#define pcm_for_each_format(f) \ + for ((f) = SNDRV_PCM_FORMAT_FIRST; \ + (__force int)(f) <= (__force int)SNDRV_PCM_FORMAT_LAST; \ + (f) = (__force snd_pcm_format_t)((__force int)(f) + 1)) + /* printk helpers */ #define pcm_err(pcm, fmt, args...) \ dev_err((pcm)->card->dev, fmt, ##args) diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h index 661450a2095b..36f94735d23d 100644 --- a/include/sound/pcm_params.h +++ b/include/sound/pcm_params.h @@ -133,6 +133,13 @@ static inline int snd_mask_test(const struct snd_mask *mask, unsigned int val) return mask->bits[MASK_OFS(val)] & MASK_BIT(val); } +/* Most of drivers need only this one */ +static inline int snd_mask_test_format(const struct snd_mask *mask, + snd_pcm_format_t format) +{ + return snd_mask_test(mask, (__force unsigned int)format); +} + static inline int snd_mask_single(const struct snd_mask *mask) { int i, c = 0; diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h index bc2c31734df1..e1f790561ac1 100644 --- a/include/sound/rt5682.h +++ b/include/sound/rt5682.h @@ -24,6 +24,12 @@ enum rt5682_jd_src { RT5682_JD1, }; +enum rt5682_dai_clks { + RT5682_DAI_WCLK_IDX, + RT5682_DAI_BCLK_IDX, + RT5682_DAI_NUM_CLKS, +}; + struct rt5682_platform_data { int ldo1_en; /* GPIO for LDO1_EN */ @@ -32,6 +38,10 @@ struct rt5682_platform_data { enum rt5682_dmic1_clk_pin dmic1_clk_pin; enum rt5682_jd_src jd_src; unsigned int btndet_delay; + unsigned int dmic_clk_rate; + unsigned int dmic_delay; + + const char *dai_clk_names[RT5682_DAI_NUM_CLKS]; }; #endif diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index a217a87cae86..392e953d561e 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -75,18 +75,45 @@ struct snd_soc_acpi_mach_params { }; /** - * snd_soc_acpi_link_adr: ACPI-based list of _ADR, with a variable - * number of devices per link - * + * snd_soc_acpi_endpoint - endpoint descriptor + * @num: endpoint number (mandatory, unique per device) + * @aggregated: 0 (independent) or 1 (logically grouped) + * @group_position: zero-based order (only when @aggregated is 1) + * @group_id: platform-unique group identifier (only when @aggregrated is 1) + */ +struct snd_soc_acpi_endpoint { + u8 num; + u8 aggregated; + u8 group_position; + u8 group_id; +}; + +/** + * snd_soc_acpi_adr_device - descriptor for _ADR-enumerated device + * @adr: 64 bit ACPI _ADR value + * @num_endpoints: number of endpoints for this device + * @endpoints: array of endpoints + */ +struct snd_soc_acpi_adr_device { + const u64 adr; + const u8 num_endpoints; + const struct snd_soc_acpi_endpoint *endpoints; +}; + +/** + * snd_soc_acpi_link_adr - ACPI-based list of _ADR enumerated devices * @mask: one bit set indicates the link this list applies to - * @num_adr: ARRAY_SIZE of adr - * @adr: array of _ADR (represented as u64). + * @num_adr: ARRAY_SIZE of devices + * @adr_d: array of devices + * + * The number of devices per link can be more than 1, e.g. in SoundWire + * multi-drop configurations. */ struct snd_soc_acpi_link_adr { const u32 mask; const u32 num_adr; - const u64 *adr; + const struct snd_soc_acpi_adr_device *adr_d; }; /** diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index eaaeb00e9e84..78bac995db15 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -202,6 +202,8 @@ struct snd_soc_dai_ops { int (*set_sdw_stream)(struct snd_soc_dai *dai, void *stream, int direction); + void *(*get_sdw_stream)(struct snd_soc_dai *dai, int direction); + /* * DAI digital mute - optional. * Called by soc-core to minimise any pops. @@ -322,9 +324,7 @@ struct snd_soc_dai { struct snd_soc_dai_driver *driver; /* DAI runtime info */ - unsigned int capture_active; /* stream usage count */ - unsigned int playback_active; /* stream usage count */ - unsigned int probed:1; + unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */ unsigned int active; @@ -348,8 +348,27 @@ struct snd_soc_dai { unsigned int rx_mask; struct list_head list; + + /* bit field */ + unsigned int probed:1; + unsigned int started:1; }; +static inline struct snd_soc_pcm_stream * +snd_soc_dai_get_pcm_stream(const struct snd_soc_dai *dai, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + &dai->driver->playback : &dai->driver->capture; +} + +static inline +struct snd_soc_dapm_widget *snd_soc_dai_get_widget( + struct snd_soc_dai *dai, int stream) +{ + return (stream == SNDRV_PCM_STREAM_PLAYBACK) ? + dai->playback_widget : dai->capture_widget; +} + static inline void *snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai, const struct snd_pcm_substream *ss) { @@ -406,4 +425,23 @@ static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai, return -ENOTSUPP; } +/** + * snd_soc_dai_get_sdw_stream() - Retrieves SDW stream from DAI + * @dai: DAI + * @direction: Stream direction(Playback/Capture) + * + * This routine only retrieves that was previously configured + * with snd_soc_dai_get_sdw_stream() + * + * Returns pointer to stream or -ENOTSUPP if callback is not supported; + */ +static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai, + int direction) +{ + if (dai->driver->ops->get_sdw_stream) + return dai->driver->ops->get_sdw_stream(dai, direction); + else + return ERR_PTR(-ENOTSUPP); +} + #endif diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 1b6afbc1a4ed..08495f8d86dc 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -482,6 +482,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_widget_list **list, bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction)); +void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list); struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm( struct snd_kcontrol *kcontrol); @@ -691,6 +692,11 @@ struct snd_soc_dapm_widget_list { struct snd_soc_dapm_widget *widgets[0]; }; +#define for_each_dapm_widgets(list, i, widget) \ + for ((i) = 0; \ + (i) < list->num_widgets && (widget = list->widgets[i]); \ + (i)++) + struct snd_soc_dapm_stats { int power_checks; int path_checks; diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index b654ebfc8766..0f6c50b17bba 100644 --- a/include/sound/soc-dpcm.h +++ b/include/sound/soc-dpcm.h @@ -132,17 +132,8 @@ int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe, struct snd_pcm_substream * snd_soc_dpcm_get_substream(struct snd_soc_pcm_runtime *be, int stream); -/* get the BE runtime state */ -enum snd_soc_dpcm_state - snd_soc_dpcm_be_get_state(struct snd_soc_pcm_runtime *be, int stream); - -/* set the BE runtime state */ -void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream, - enum snd_soc_dpcm_state state); - -/* internal use only */ -int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute); -int soc_dpcm_runtime_update(struct snd_soc_card *); +/* update audio routing between PCMs and any DAI links */ +int snd_soc_dpcm_runtime_update(struct snd_soc_card *card); #ifdef CONFIG_DEBUG_FS void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd); @@ -154,6 +145,7 @@ static inline void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd) int dpcm_path_get(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list_); +void dpcm_path_put(struct snd_soc_dapm_widget_list **list); int dpcm_process_paths(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list, int new); int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream); @@ -167,10 +159,4 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream); int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, int event); -static inline void dpcm_path_put(struct snd_soc_dapm_widget_list **list) -{ - kfree(*list); -} - - #endif diff --git a/include/sound/soc.h b/include/sound/soc.h index f0e4f36f83bf..13458e4fbb13 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -471,6 +471,9 @@ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd); void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream); void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream); +int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_hardware *hw, int stream); + int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd, unsigned int dai_fmt); @@ -855,6 +858,11 @@ struct snd_soc_dai_link { ((platform) = &link->platforms[i]); \ (i)++) +#define for_each_link_cpus(link, i, cpu) \ + for ((i) = 0; \ + ((i) < link->num_cpus) && ((cpu) = &link->cpus[i]); \ + (i)++) + /* * Sample 1 : Single CPU/Codec/Platform * @@ -1058,6 +1066,7 @@ struct snd_soc_card { const struct snd_soc_dapm_route *of_dapm_routes; int num_of_dapm_routes; bool fully_routed; + bool disable_route_checks; /* lists of probed devices belonging to this card */ struct list_head component_dev_list; @@ -1109,6 +1118,14 @@ struct snd_soc_card { #define for_each_card_components(card, component) \ list_for_each_entry(component, &(card)->component_dev_list, card_list) +#define for_each_card_dapms(card, dapm) \ + list_for_each_entry(dapm, &card->dapm_list, list) + +#define for_each_card_widgets(card, w)\ + list_for_each_entry(w, &card->widgets, list) +#define for_each_card_widgets_safe(card, w, _w) \ + list_for_each_entry_safe(w, _w, &card->widgets, list) + /* SoC machine DAI configuration, glues a codec and cpu DAI together */ struct snd_soc_pcm_runtime { struct device *dev; @@ -1128,10 +1145,14 @@ struct snd_soc_pcm_runtime { struct snd_compr *compr; struct snd_soc_dai *codec_dai; struct snd_soc_dai *cpu_dai; + struct snd_soc_dai **dais; struct snd_soc_dai **codec_dais; unsigned int num_codecs; + struct snd_soc_dai **cpu_dais; + unsigned int num_cpus; + struct delayed_work delayed_work; void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd); #ifdef CONFIG_DEBUG_FS @@ -1148,16 +1169,31 @@ struct snd_soc_pcm_runtime { int num_components; struct snd_soc_component *components[0]; /* CPU/Codec/Platform */ }; +/* see soc_new_pcm_runtime() */ +#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n] +#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus] + #define for_each_rtd_components(rtd, i, component) \ for ((i) = 0; \ ((i) < rtd->num_components) && ((component) = rtd->components[i]);\ (i)++) -#define for_each_rtd_codec_dai(rtd, i, dai)\ - for ((i) = 0; \ - ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ +#define for_each_rtd_cpu_dais(rtd, i, dai) \ + for ((i) = 0; \ + ((i) < rtd->num_cpus) && ((dai) = rtd->cpu_dais[i]); \ + (i)++) +#define for_each_rtd_cpu_dais_rollback(rtd, i, dai) \ + for (; (--(i) >= 0) && ((dai) = rtd->cpu_dais[i]);) +#define for_each_rtd_codec_dais(rtd, i, dai) \ + for ((i) = 0; \ + ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ + (i)++) +#define for_each_rtd_codec_dais_rollback(rtd, i, dai) \ + for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);) +#define for_each_rtd_dais(rtd, i, dai) \ + for ((i) = 0; \ + ((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \ + ((dai) = (rtd)->dais[i]); \ (i)++) -#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \ - for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);) void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd); diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 5f1ef5565be6..04e48227f542 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -87,6 +87,15 @@ struct sof_ipc_dai_hda_params { uint32_t link_dma_ch; } __packed; +/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */ +struct sof_ipc_dai_alh_params { + struct sof_ipc_hdr hdr; + uint32_t stream_id; + + /* reserved for future use */ + uint32_t reserved[15]; +} __packed; + /* DMIC Configuration Request - SOF_IPC_DAI_DMIC_CONFIG */ /* This struct is defined per 2ch PDM controller available in the platform. @@ -179,13 +188,4 @@ struct sof_ipc_dai_dmic_params { struct sof_ipc_dai_dmic_pdm_ctrl pdm[0]; } __packed; -/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */ -struct sof_ipc_dai_alh_params { - struct sof_ipc_hdr hdr; - uint32_t stream_id; - - /* reserved for future use */ - uint32_t reserved[15]; -} __packed; - #endif diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h index bf3edd9c08b4..b79479575cc8 100644 --- a/include/sound/sof/header.h +++ b/include/sound/sof/header.h @@ -51,6 +51,7 @@ #define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U) #define SOF_IPC_GLB_GDB_DEBUG SOF_GLB_TYPE(0xAU) #define SOF_IPC_GLB_TEST_MSG SOF_GLB_TYPE(0xBU) +#define SOF_IPC_GLB_PROBE SOF_GLB_TYPE(0xCU) /* * DSP Command Message Types @@ -102,6 +103,16 @@ #define SOF_IPC_STREAM_VORBIS_PARAMS SOF_CMD_TYPE(0x010) #define SOF_IPC_STREAM_VORBIS_FREE SOF_CMD_TYPE(0x011) +/* probe */ +#define SOF_IPC_PROBE_INIT SOF_CMD_TYPE(0x001) +#define SOF_IPC_PROBE_DEINIT SOF_CMD_TYPE(0x002) +#define SOF_IPC_PROBE_DMA_ADD SOF_CMD_TYPE(0x003) +#define SOF_IPC_PROBE_DMA_INFO SOF_CMD_TYPE(0x004) +#define SOF_IPC_PROBE_DMA_REMOVE SOF_CMD_TYPE(0x005) +#define SOF_IPC_PROBE_POINT_ADD SOF_CMD_TYPE(0x006) +#define SOF_IPC_PROBE_POINT_INFO SOF_CMD_TYPE(0x007) +#define SOF_IPC_PROBE_POINT_REMOVE SOF_CMD_TYPE(0x008) + /* trace */ #define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001) #define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002) diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 1c560144996c..438a11fcf272 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -28,9 +28,9 @@ /* extended data types that can be appended onto end of sof_ipc_fw_ready */ enum sof_ipc_ext_data { - SOF_IPC_EXT_DMA_BUFFER = 0, - SOF_IPC_EXT_WINDOW, - SOF_IPC_EXT_CC_INFO, + SOF_IPC_EXT_UNUSED = 0, + SOF_IPC_EXT_WINDOW = 1, + SOF_IPC_EXT_CC_INFO = 2, }; /* FW version - SOF_IPC_GLB_VERSION */ @@ -83,22 +83,6 @@ struct sof_ipc_ext_data_hdr { uint32_t type; /**< SOF_IPC_EXT_ */ } __packed; -struct sof_ipc_dma_buffer_elem { - struct sof_ipc_hdr hdr; - uint32_t type; /**< SOF_IPC_REGION_ */ - uint32_t id; /**< platform specific - used to map to host memory */ - struct sof_ipc_host_buffer buffer; -} __packed; - -/* extended data DMA buffers for IPC, trace and debug */ -struct sof_ipc_dma_buffer_data { - struct sof_ipc_ext_data_hdr ext_hdr; - uint32_t num_buffers; - - /* host files in buffer[n].buffer */ - struct sof_ipc_dma_buffer_elem buffer[]; -} __packed; - struct sof_ipc_window_elem { struct sof_ipc_hdr hdr; uint32_t type; /**< SOF_IPC_REGION_ */ diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index 8e76178fedf0..402e0250c508 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -53,9 +53,10 @@ struct sof_ipc_comp { uint32_t id; enum sof_comp_type type; uint32_t pipeline_id; + uint32_t core; /* reserved for future use */ - uint32_t reserved[2]; + uint32_t reserved[1]; } __packed; /* diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index a49d37140a64..591cd9e4692c 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -676,7 +676,7 @@ struct iscsi_session { atomic_t session_logout; atomic_t session_reinstatement; atomic_t session_stop_active; - atomic_t sleep_on_sess_wait_comp; + atomic_t session_close; /* connection list */ struct list_head sess_conn_list; struct list_head cr_active_list; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 51b6f50eabee..1b752d8ea529 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -116,10 +116,4 @@ static inline bool target_dev_configured(struct se_device *se_dev) return !!(se_dev->dev_flags & DF_CONFIGURED); } -/* Only use get_unaligned_be24() if reading p - 1 is allowed. */ -static inline uint32_t get_unaligned_be24(const uint8_t *const p) -{ - return get_unaligned_be32(p - 1) & 0xffffffU; -} - #endif /* TARGET_CORE_BACKEND_H */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 1728e883b7b2..6d4a694f6ea7 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -74,8 +74,6 @@ #define DA_EMULATE_MODEL_ALIAS 0 /* Emulation for WriteCache and SYNCHRONIZE_CACHE */ #define DA_EMULATE_WRITE_CACHE 0 -/* Emulation for UNIT ATTENTION Interlock Control */ -#define DA_EMULATE_UA_INTLLCK_CTRL 0 /* Emulation for TASK_ABORTED status (TAS) by default */ #define DA_EMULATE_TAS 1 /* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */ @@ -433,6 +431,13 @@ enum target_prot_type { TARGET_DIF_TYPE3_PROT, }; +/* Emulation for UNIT ATTENTION Interlock Control */ +enum target_ua_intlck_ctrl { + TARGET_UA_INTLCK_CTRL_CLEAR = 0, + TARGET_UA_INTLCK_CTRL_NO_CLEAR = 1, + TARGET_UA_INTLCK_CTRL_ESTABLISH_UA = 2, +}; + enum target_core_dif_check { TARGET_DIF_CHECK_GUARD = 0x1 << 0, TARGET_DIF_CHECK_APPTAG = 0x1 << 1, @@ -663,26 +668,26 @@ struct se_dev_entry { }; struct se_dev_attrib { - int emulate_model_alias; - int emulate_dpo; - int emulate_fua_write; - int emulate_fua_read; - int emulate_write_cache; - int emulate_ua_intlck_ctrl; - int emulate_tas; - int emulate_tpu; - int emulate_tpws; - int emulate_caw; - int emulate_3pc; - int emulate_pr; + bool emulate_model_alias; + bool emulate_dpo; /* deprecated */ + bool emulate_fua_write; + bool emulate_fua_read; /* deprecated */ + bool emulate_write_cache; + enum target_ua_intlck_ctrl emulate_ua_intlck_ctrl; + bool emulate_tas; + bool emulate_tpu; + bool emulate_tpws; + bool emulate_caw; + bool emulate_3pc; + bool emulate_pr; enum target_prot_type pi_prot_type; enum target_prot_type hw_pi_prot_type; - int pi_prot_verify; - int enforce_pr_isids; - int force_pr_aptpl; - int is_nonrot; - int emulate_rest_reord; - int unmap_zeroes_data; + bool pi_prot_verify; + bool enforce_pr_isids; + bool force_pr_aptpl; + bool is_nonrot; + bool emulate_rest_reord; + bool unmap_zeroes_data; u32 hw_block_size; u32 block_size; u32 hw_max_sectors; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index b04c29270973..1ce3be63add1 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -75,13 +75,17 @@ static inline void bpf_test_probe_##call(void) \ check_trace_callback_type_##call(__bpf_trace_##template); \ } \ typedef void (*btf_trace_##call)(void *__data, proto); \ -static struct bpf_raw_event_map __used \ - __attribute__((section("__bpf_raw_tp_map"))) \ -__bpf_trace_tp_map_##call = { \ - .tp = &__tracepoint_##call, \ - .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \ - .num_args = COUNT_ARGS(args), \ - .writable_size = size, \ +static union { \ + struct bpf_raw_event_map event; \ + btf_trace_##call handler; \ +} __bpf_trace_tp_map_##call __used \ +__attribute__((section("__bpf_raw_tp_map"))) = { \ + .event = { \ + .tp = &__tracepoint_##call, \ + .bpf_func = __bpf_trace_##template, \ + .num_args = COUNT_ARGS(args), \ + .writable_size = size, \ + }, \ }; #define FIRST(x, ...) x diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 564ba1b5cf57..c612cabbc378 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -233,7 +233,7 @@ enum afs_cb_break_reason { EM(afs_call_trace_get, "GET ") \ EM(afs_call_trace_put, "PUT ") \ EM(afs_call_trace_wake, "WAKE ") \ - E_(afs_call_trace_work, "WORK ") + E_(afs_call_trace_work, "QUEUE") #define afs_server_traces \ EM(afs_server_trace_alloc, "ALLOC ") \ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 17088a112ed0..bcbc763b8814 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -81,13 +81,14 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); #define show_extent_io_tree_owner(owner) \ __print_symbolic(owner, \ - { IO_TREE_FS_INFO_FREED_EXTENTS0, "FREED_EXTENTS0" }, \ - { IO_TREE_FS_INFO_FREED_EXTENTS1, "FREED_EXTENTS1" }, \ + { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \ + { IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS" }, \ { IO_TREE_INODE_IO, "INODE_IO" }, \ { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \ { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \ { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \ { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \ + { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \ { IO_TREE_SELFTEST, "SELFTEST" }) #define BTRFS_GROUP_FLAGS \ @@ -468,7 +469,6 @@ DEFINE_EVENT( { (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \ { (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \ { (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \ - { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \ { (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" }) diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 67a97838c2a0..d97adfc327f0 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -153,7 +153,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); #define show_compress_algorithm(type) \ __print_symbolic(type, \ { COMPRESS_LZO, "LZO" }, \ - { COMPRESS_LZ4, "LZ4" }) + { COMPRESS_LZ4, "LZ4" }, \ + { COMPRESS_ZSTD, "ZSTD" }) struct f2fs_sb_info; struct f2fs_io_info; diff --git a/include/trace/events/gpu_mem.h b/include/trace/events/gpu_mem.h new file mode 100644 index 000000000000..1897822a9150 --- /dev/null +++ b/include/trace/events/gpu_mem.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GPU memory trace points + * + * Copyright (C) 2020 Google, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_mem + +#if !defined(_TRACE_GPU_MEM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_GPU_MEM_H + +#include <linux/tracepoint.h> + +/* + * The gpu_memory_total event indicates that there's an update to either the + * global or process total gpu memory counters. + * + * This event should be emitted whenever the kernel device driver allocates, + * frees, imports, unimports memory in the GPU addressable space. + * + * @gpu_id: This is the gpu id. + * + * @pid: Put 0 for global total, while positive pid for process total. + * + * @size: Virtual size of the allocation in bytes. + * + */ +TRACE_EVENT(gpu_mem_total, + + TP_PROTO(uint32_t gpu_id, uint32_t pid, uint64_t size), + + TP_ARGS(gpu_id, pid, size), + + TP_STRUCT__entry( + __field(uint32_t, gpu_id) + __field(uint32_t, pid) + __field(uint64_t, size) + ), + + TP_fast_assign( + __entry->gpu_id = gpu_id; + __entry->pid = pid; + __entry->size = size; + ), + + TP_printk("gpu_id=%u pid=%u size=%llu", + __entry->gpu_id, + __entry->pid, + __entry->size) +); + +#endif /* _TRACE_GPU_MEM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index d82a0f4e824d..70e32ff096ec 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -13,6 +13,7 @@ EM( SCAN_PMD_NULL, "pmd_null") \ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ + EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \ EM( SCAN_PAGE_RO, "no_writable_page") \ EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \ EM( SCAN_PAGE_NULL, "page_null") \ diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 27bd9e4f927b..9f0d3b7d56b0 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -357,6 +357,109 @@ TRACE_EVENT(io_uring_submit_sqe, __entry->force_nonblock, __entry->sq_thread) ); +TRACE_EVENT(io_uring_poll_arm, + + TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), + + TP_ARGS(ctx, opcode, user_data, mask, events), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( u8, opcode ) + __field( u64, user_data ) + __field( int, mask ) + __field( int, events ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->opcode = opcode; + __entry->user_data = user_data; + __entry->mask = mask; + __entry->events = events; + ), + + TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", + __entry->ctx, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask, __entry->events) +); + +TRACE_EVENT(io_uring_poll_wake, + + TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask), + + TP_ARGS(ctx, opcode, user_data, mask), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( u8, opcode ) + __field( u64, user_data ) + __field( int, mask ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->opcode = opcode; + __entry->user_data = user_data; + __entry->mask = mask; + ), + + TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x", + __entry->ctx, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask) +); + +TRACE_EVENT(io_uring_task_add, + + TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask), + + TP_ARGS(ctx, opcode, user_data, mask), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( u8, opcode ) + __field( u64, user_data ) + __field( int, mask ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->opcode = opcode; + __entry->user_data = user_data; + __entry->mask = mask; + ), + + TP_printk("ring %p, op %d, data 0x%llx, mask %x", + __entry->ctx, __entry->opcode, + (unsigned long long) __entry->user_data, + __entry->mask) +); + +TRACE_EVENT(io_uring_task_run, + + TP_PROTO(void *ctx, u8 opcode, u64 user_data), + + TP_ARGS(ctx, opcode, user_data), + + TP_STRUCT__entry ( + __field( void *, ctx ) + __field( u8, opcode ) + __field( u64, user_data ) + ), + + TP_fast_assign( + __entry->ctx = ctx; + __entry->opcode = opcode; + __entry->user_data = user_data; + ), + + TP_printk("ring %p, op %d, data 0x%llx", + __entry->ctx, __entry->opcode, + (unsigned long long) __entry->user_data) +); + #endif /* _TRACE_IO_URING_H */ /* This part must be outside protection */ diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h new file mode 100644 index 000000000000..4661f7ba07c0 --- /dev/null +++ b/include/trace/events/mmap.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mmap + +#if !defined(_TRACE_MMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MMAP_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(vm_unmapped_area, + + TP_PROTO(unsigned long addr, struct vm_unmapped_area_info *info), + + TP_ARGS(addr, info), + + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, total_vm) + __field(unsigned long, flags) + __field(unsigned long, length) + __field(unsigned long, low_limit) + __field(unsigned long, high_limit) + __field(unsigned long, align_mask) + __field(unsigned long, align_offset) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->total_vm = current->mm->total_vm; + __entry->flags = info->flags; + __entry->length = info->length; + __entry->low_limit = info->low_limit; + __entry->high_limit = info->high_limit; + __entry->align_mask = info->align_mask; + __entry->align_offset = info->align_offset; + ), + + TP_printk("addr=0x%lx err=%ld total_vm=0x%lx flags=0x%lx len=0x%lx lo=0x%lx hi=0x%lx mask=0x%lx ofs=0x%lx\n", + IS_ERR_VALUE(__entry->addr) ? 0 : __entry->addr, + IS_ERR_VALUE(__entry->addr) ? __entry->addr : 0, + __entry->total_vm, __entry->flags, __entry->length, + __entry->low_limit, __entry->high_limit, __entry->align_mask, + __entry->align_offset) +); +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index a1675d43777e..5fb752034386 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -154,6 +154,7 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" ) {VM_ACCOUNT, "account" }, \ {VM_NORESERVE, "noreserve" }, \ {VM_HUGETLB, "hugetlb" }, \ + {VM_SYNC, "sync" }, \ __VM_ARCH_SPECIFIC_1 , \ {VM_WIPEONFORK, "wipeonfork" }, \ {VM_DONTDUMP, "dontdump" }, \ diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 7457e238e1b7..af5018aa9517 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -359,75 +359,50 @@ DEFINE_EVENT(power_domain, power_domain_target, ); /* - * The pm qos events are used for pm qos update + * CPU latency QoS events used for global CPU latency QoS list updates */ -DECLARE_EVENT_CLASS(pm_qos_request, +DECLARE_EVENT_CLASS(cpu_latency_qos_request, - TP_PROTO(int pm_qos_class, s32 value), + TP_PROTO(s32 value), - TP_ARGS(pm_qos_class, value), + TP_ARGS(value), TP_STRUCT__entry( - __field( int, pm_qos_class ) __field( s32, value ) ), TP_fast_assign( - __entry->pm_qos_class = pm_qos_class; __entry->value = value; ), - TP_printk("pm_qos_class=%s value=%d", - __print_symbolic(__entry->pm_qos_class, - { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }), + TP_printk("CPU_DMA_LATENCY value=%d", __entry->value) ); -DEFINE_EVENT(pm_qos_request, pm_qos_add_request, +DEFINE_EVENT(cpu_latency_qos_request, pm_qos_add_request, - TP_PROTO(int pm_qos_class, s32 value), + TP_PROTO(s32 value), - TP_ARGS(pm_qos_class, value) + TP_ARGS(value) ); -DEFINE_EVENT(pm_qos_request, pm_qos_update_request, +DEFINE_EVENT(cpu_latency_qos_request, pm_qos_update_request, - TP_PROTO(int pm_qos_class, s32 value), + TP_PROTO(s32 value), - TP_ARGS(pm_qos_class, value) + TP_ARGS(value) ); -DEFINE_EVENT(pm_qos_request, pm_qos_remove_request, +DEFINE_EVENT(cpu_latency_qos_request, pm_qos_remove_request, - TP_PROTO(int pm_qos_class, s32 value), + TP_PROTO(s32 value), - TP_ARGS(pm_qos_class, value) -); - -TRACE_EVENT(pm_qos_update_request_timeout, - - TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us), - - TP_ARGS(pm_qos_class, value, timeout_us), - - TP_STRUCT__entry( - __field( int, pm_qos_class ) - __field( s32, value ) - __field( unsigned long, timeout_us ) - ), - - TP_fast_assign( - __entry->pm_qos_class = pm_qos_class; - __entry->value = value; - __entry->timeout_us = timeout_us; - ), - - TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld", - __print_symbolic(__entry->pm_qos_class, - { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }), - __entry->value, __entry->timeout_us) + TP_ARGS(value) ); +/* + * General PM QoS events used for updates of PM QoS request lists + */ DECLARE_EVENT_CLASS(pm_qos_update, TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value), diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h new file mode 100644 index 000000000000..b71f680968eb --- /dev/null +++ b/include/trace/events/qla.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_TRACE_QLA_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_QLA_H_ + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM qla + +#define QLA_MSG_MAX 256 + +DECLARE_EVENT_CLASS(qla_log_event, + TP_PROTO(const char *buf, + struct va_format *vaf), + + TP_ARGS(buf, vaf), + + TP_STRUCT__entry( + __string(buf, buf) + __dynamic_array(char, msg, QLA_MSG_MAX) + ), + TP_fast_assign( + __assign_str(buf, buf); + vsnprintf(__get_str(msg), QLA_MSG_MAX, vaf->fmt, *vaf->va); + ), + + TP_printk("%s %s", __get_str(buf), __get_str(msg)) +); + +DEFINE_EVENT(qla_log_event, ql_dbg_log, + TP_PROTO(const char *buf, struct va_format *vaf), + TP_ARGS(buf, vaf) +); + +#endif /* _TRACE_QLA_H */ + +#define TRACE_INCLUDE_FILE qla + +#include <trace/define_trace.h> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 5e49b06e8104..f9a7811148e2 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -624,6 +624,34 @@ TRACE_EVENT_RCU(rcu_invoke_kfree_callback, ); /* + * Tracepoint for the invocation of a single RCU callback of the special + * kfree_bulk() form. The first argument is the RCU flavor, the second + * argument is a number of elements in array to free, the third is an + * address of the array holding nr_records entries. + */ +TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback, + + TP_PROTO(const char *rcuname, unsigned long nr_records, void **p), + + TP_ARGS(rcuname, nr_records, p), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(unsigned long, nr_records) + __field(void **, p) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->nr_records = nr_records; + __entry->p = p; + ), + + TP_printk("%s bulk=0x%p nr_records=%lu", + __entry->rcuname, __entry->p, __entry->nr_records) +); + +/* * Tracepoint for exiting rcu_do_batch after RCU callbacks have been * invoked. The first argument is the name of the RCU flavor, * the second argument is number of callbacks actually invoked, @@ -712,6 +740,7 @@ TRACE_EVENT_RCU(rcu_torture_read, * "Begin": rcu_barrier() started. * "EarlyExit": rcu_barrier() piggybacked, thus early exit. * "Inc1": rcu_barrier() piggyback check counter incremented. + * "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks. * "OnlineQ": rcu_barrier() found online CPU with callbacks. * "OnlineNQ": rcu_barrier() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index 9827f535f032..32d88c4fb063 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -126,7 +126,7 @@ DEFINE_GSSAPI_EVENT(verify_mic); DEFINE_GSSAPI_EVENT(wrap); DEFINE_GSSAPI_EVENT(unwrap); -TRACE_EVENT(rpcgss_accept_upcall, +TRACE_EVENT(rpcgss_svc_accept_upcall, TP_PROTO( __be32 xid, u32 major_status, @@ -154,6 +154,29 @@ TRACE_EVENT(rpcgss_accept_upcall, ) ); +TRACE_EVENT(rpcgss_svc_accept, + TP_PROTO( + __be32 xid, + size_t len + ), + + TP_ARGS(xid, len), + + TP_STRUCT__entry( + __field(u32, xid) + __field(size_t, len) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(xid); + __entry->len = len; + ), + + TP_printk("xid=0x%08x len=%zu", + __entry->xid, __entry->len + ) +); + /** ** GSS auth unwrap failures @@ -268,6 +291,40 @@ TRACE_EVENT(rpcgss_need_reencode, __entry->ret ? "" : "un") ); +DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class, + TP_PROTO( + __be32 xid, + u32 seqno + ), + + TP_ARGS(xid, seqno), + + TP_STRUCT__entry( + __field(u32, xid) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(xid); + __entry->seqno = seqno; + ), + + TP_printk("xid=0x%08x seqno=%u, request discarded", + __entry->xid, __entry->seqno) +); + +#define DEFINE_SVC_SEQNO_EVENT(name) \ + DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_##name, \ + TP_PROTO( \ + __be32 xid, \ + u32 seqno \ + ), \ + TP_ARGS(xid, seqno)) + +DEFINE_SVC_SEQNO_EVENT(large_seqno); +DEFINE_SVC_SEQNO_EVENT(old_seqno); + + /** ** gssd upcall related trace events **/ diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index c0e4c93324f5..051f26fedc4d 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -104,12 +104,12 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class, TP_fast_assign( __entry->r_xprt = r_xprt; __entry->rc = rc; - __entry->connect_status = r_xprt->rx_ep.rep_connected; + __entry->connect_status = r_xprt->rx_ep->re_connect_status; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connect status=%d", + TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", __get_str(addr), __get_str(port), __entry->r_xprt, __entry->rc, __entry->connect_status ) @@ -228,20 +228,20 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done, TP_ARGS(wc, frwr), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) __field(unsigned int, status) __field(unsigned int, vendor_err) ), TP_fast_assign( - __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); + __entry->mr_id = frwr->fr_mr->res.id; __entry->status = wc->status; __entry->vendor_err = __entry->status ? wc->vendor_err : 0; ), TP_printk( - "mr=%p: %s (%u/0x%x)", - __entry->mr, rdma_show_wc_status(__entry->status), + "mr.id=%u: %s (%u/0x%x)", + __entry->mr_id, rdma_show_wc_status(__entry->status), __entry->status, __entry->vendor_err ) ); @@ -274,7 +274,8 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, TP_ARGS(mr), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) + __field(int, nents) __field(u32, handle) __field(u32, length) __field(u64, offset) @@ -282,15 +283,16 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ), TP_fast_assign( - __entry->mr = mr; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; __entry->offset = mr->mr_offset; __entry->dir = mr->mr_dir; ), - TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)", - __entry->mr, __entry->length, + TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", + __entry->mr_id, __entry->nents, __entry->length, (unsigned long long)__entry->offset, __entry->handle, xprtrdma_show_direction(__entry->dir) ) @@ -340,68 +342,37 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event, ** Connection events **/ -TRACE_EVENT(xprtrdma_cm_event, - TP_PROTO( - const struct rpcrdma_xprt *r_xprt, - struct rdma_cm_event *event - ), - - TP_ARGS(r_xprt, event), - - TP_STRUCT__entry( - __field(const void *, r_xprt) - __field(unsigned int, event) - __field(int, status) - __string(addr, rpcrdma_addrstr(r_xprt)) - __string(port, rpcrdma_portstr(r_xprt)) - ), - - TP_fast_assign( - __entry->r_xprt = r_xprt; - __entry->event = event->event; - __entry->status = event->status; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); - ), - - TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)", - __get_str(addr), __get_str(port), - __entry->r_xprt, rdma_show_cm_event(__entry->event), - __entry->event, __entry->status - ) -); - TRACE_EVENT(xprtrdma_inline_thresh, TP_PROTO( - const struct rpcrdma_xprt *r_xprt + const struct rpcrdma_ep *ep ), - TP_ARGS(r_xprt), + TP_ARGS(ep), TP_STRUCT__entry( - __field(const void *, r_xprt) __field(unsigned int, inline_send) __field(unsigned int, inline_recv) __field(unsigned int, max_send) __field(unsigned int, max_recv) - __string(addr, rpcrdma_addrstr(r_xprt)) - __string(port, rpcrdma_portstr(r_xprt)) + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( - const struct rpcrdma_ep *ep = &r_xprt->rx_ep; + const struct rdma_cm_id *id = ep->re_id; - __entry->r_xprt = r_xprt; - __entry->inline_send = ep->rep_inline_send; - __entry->inline_recv = ep->rep_inline_recv; - __entry->max_send = ep->rep_max_inline_send; - __entry->max_recv = ep->rep_max_inline_recv; - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + __entry->inline_send = ep->re_inline_send; + __entry->inline_recv = ep->re_inline_recv; + __entry->max_send = ep->re_max_inline_send; + __entry->max_recv = ep->re_max_inline_recv; + memcpy(__entry->srcaddr, &id->route.addr.src_addr, + sizeof(struct sockaddr_in6)); + memcpy(__entry->dstaddr, &id->route.addr.dst_addr, + sizeof(struct sockaddr_in6)); ), - TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc send/recv=%u/%u", - __get_str(addr), __get_str(port), __entry->r_xprt, + TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", + __entry->srcaddr, __entry->dstaddr, __entry->inline_send, __entry->inline_recv, __entry->max_send, __entry->max_recv ) @@ -409,11 +380,10 @@ TRACE_EVENT(xprtrdma_inline_thresh, DEFINE_CONN_EVENT(connect); DEFINE_CONN_EVENT(disconnect); +DEFINE_CONN_EVENT(flush_dct); DEFINE_RXPRT_EVENT(xprtrdma_create); DEFINE_RXPRT_EVENT(xprtrdma_op_destroy); -DEFINE_RXPRT_EVENT(xprtrdma_remove); -DEFINE_RXPRT_EVENT(xprtrdma_reinsert); DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); DEFINE_RXPRT_EVENT(xprtrdma_op_close); DEFINE_RXPRT_EVENT(xprtrdma_op_setport); @@ -480,32 +450,33 @@ TRACE_EVENT(xprtrdma_op_set_cto, TRACE_EVENT(xprtrdma_qp_event, TP_PROTO( - const struct rpcrdma_xprt *r_xprt, + const struct rpcrdma_ep *ep, const struct ib_event *event ), - TP_ARGS(r_xprt, event), + TP_ARGS(ep, event), TP_STRUCT__entry( - __field(const void *, r_xprt) - __field(unsigned int, event) + __field(unsigned long, event) __string(name, event->device->name) - __string(addr, rpcrdma_addrstr(r_xprt)) - __string(port, rpcrdma_portstr(r_xprt)) + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( - __entry->r_xprt = r_xprt; + const struct rdma_cm_id *id = ep->re_id; + __entry->event = event->event; __assign_str(name, event->device->name); - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + memcpy(__entry->srcaddr, &id->route.addr.src_addr, + sizeof(struct sockaddr_in6)); + memcpy(__entry->dstaddr, &id->route.addr.dst_addr, + sizeof(struct sockaddr_in6)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)", - __get_str(addr), __get_str(port), __entry->r_xprt, - __get_str(name), rdma_show_ib_event(__entry->event), - __entry->event + TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", + __entry->srcaddr, __entry->dstaddr, __get_str(name), + rdma_show_ib_event(__entry->event), __entry->event ) ); @@ -801,7 +772,7 @@ TRACE_EVENT(xprtrdma_post_recvs, __entry->r_xprt = r_xprt; __entry->count = count; __entry->status = status; - __entry->posted = r_xprt->rx_ep.rep_receive_count; + __entry->posted = r_xprt->rx_ep->re_receive_count; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), @@ -920,17 +891,17 @@ TRACE_EVENT(xprtrdma_frwr_alloc, TP_ARGS(mr, rc), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) __field(int, rc) ), TP_fast_assign( - __entry->mr = mr; - __entry->rc = rc; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->rc = rc; ), - TP_printk("mr=%p: rc=%d", - __entry->mr, __entry->rc + TP_printk("mr.id=%u: rc=%d", + __entry->mr_id, __entry->rc ) ); @@ -943,7 +914,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg, TP_ARGS(mr, rc), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) + __field(int, nents) __field(u32, handle) __field(u32, length) __field(u64, offset) @@ -952,7 +924,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg, ), TP_fast_assign( - __entry->mr = mr; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; __entry->offset = mr->mr_offset; @@ -960,8 +933,8 @@ TRACE_EVENT(xprtrdma_frwr_dereg, __entry->rc = rc; ), - TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d", - __entry->mr, __entry->length, + TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", + __entry->mr_id, __entry->nents, __entry->length, (unsigned long long)__entry->offset, __entry->handle, xprtrdma_show_direction(__entry->dir), __entry->rc @@ -977,21 +950,21 @@ TRACE_EVENT(xprtrdma_frwr_sgerr, TP_ARGS(mr, sg_nents), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) __field(u64, addr) __field(u32, dir) __field(int, nents) ), TP_fast_assign( - __entry->mr = mr; + __entry->mr_id = mr->frwr.fr_mr->res.id; __entry->addr = mr->mr_sg->dma_address; __entry->dir = mr->mr_dir; __entry->nents = sg_nents; ), - TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d", - __entry->mr, __entry->addr, + TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", + __entry->mr_id, __entry->addr, xprtrdma_show_direction(__entry->dir), __entry->nents ) @@ -1006,7 +979,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr, TP_ARGS(mr, num_mapped), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) __field(u64, addr) __field(u32, dir) __field(int, num_mapped) @@ -1014,15 +987,15 @@ TRACE_EVENT(xprtrdma_frwr_maperr, ), TP_fast_assign( - __entry->mr = mr; + __entry->mr_id = mr->frwr.fr_mr->res.id; __entry->addr = mr->mr_sg->dma_address; __entry->dir = mr->mr_dir; __entry->num_mapped = num_mapped; __entry->nents = mr->mr_nents; ), - TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d", - __entry->mr, __entry->addr, + TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", + __entry->mr_id, __entry->addr, xprtrdma_show_direction(__entry->dir), __entry->num_mapped, __entry->nents ) @@ -1031,7 +1004,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr, DEFINE_MR_EVENT(localinv); DEFINE_MR_EVENT(map); DEFINE_MR_EVENT(unmap); -DEFINE_MR_EVENT(remoteinv); +DEFINE_MR_EVENT(reminv); DEFINE_MR_EVENT(recycle); TRACE_EVENT(xprtrdma_dma_maperr, @@ -1469,7 +1442,7 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event, ); #define DEFINE_SEGMENT_EVENT(name) \ - DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\ + DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ TP_PROTO( \ u32 handle, \ u32 length, \ @@ -1477,8 +1450,11 @@ DECLARE_EVENT_CLASS(svcrdma_segment_event, ), \ TP_ARGS(handle, length, offset)) -DEFINE_SEGMENT_EVENT(rseg); -DEFINE_SEGMENT_EVENT(wseg); +DEFINE_SEGMENT_EVENT(decode_wseg); +DEFINE_SEGMENT_EVENT(encode_rseg); +DEFINE_SEGMENT_EVENT(send_rseg); +DEFINE_SEGMENT_EVENT(encode_wseg); +DEFINE_SEGMENT_EVENT(send_wseg); DECLARE_EVENT_CLASS(svcrdma_chunk_event, TP_PROTO( @@ -1501,17 +1477,19 @@ DECLARE_EVENT_CLASS(svcrdma_chunk_event, ); #define DEFINE_CHUNK_EVENT(name) \ - DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\ + DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \ TP_PROTO( \ u32 length \ ), \ TP_ARGS(length)) -DEFINE_CHUNK_EVENT(pzr); -DEFINE_CHUNK_EVENT(write); -DEFINE_CHUNK_EVENT(reply); +DEFINE_CHUNK_EVENT(send_pzr); +DEFINE_CHUNK_EVENT(encode_write_chunk); +DEFINE_CHUNK_EVENT(send_write_chunk); +DEFINE_CHUNK_EVENT(encode_read_chunk); +DEFINE_CHUNK_EVENT(send_reply_chunk); -TRACE_EVENT(svcrdma_encode_read, +TRACE_EVENT(svcrdma_send_read_chunk, TP_PROTO( u32 length, u32 position @@ -1634,6 +1612,24 @@ TRACE_EVENT(svcrdma_dma_map_rwctx, ) ); +TRACE_EVENT(svcrdma_send_pullup, + TP_PROTO( + unsigned int len + ), + + TP_ARGS(len), + + TP_STRUCT__entry( + __field(unsigned int, len) + ), + + TP_fast_assign( + __entry->len = len; + ), + + TP_printk("len=%u", __entry->len) +); + TRACE_EVENT(svcrdma_send_failed, TP_PROTO( const struct svc_rqst *rqst, @@ -1813,34 +1809,6 @@ TRACE_EVENT(svcrdma_post_rw, DEFINE_SENDCOMP_EVENT(read); DEFINE_SENDCOMP_EVENT(write); -TRACE_EVENT(svcrdma_cm_event, - TP_PROTO( - const struct rdma_cm_event *event, - const struct sockaddr *sap - ), - - TP_ARGS(event, sap), - - TP_STRUCT__entry( - __field(unsigned int, event) - __field(int, status) - __array(__u8, addr, INET6_ADDRSTRLEN + 10) - ), - - TP_fast_assign( - __entry->event = event->event; - __entry->status = event->status; - snprintf(__entry->addr, sizeof(__entry->addr) - 1, - "%pISpc", sap); - ), - - TP_printk("addr=%s event=%s (%u/%d)", - __entry->addr, - rdma_show_cm_event(__entry->event), - __entry->event, __entry->status - ) -); - TRACE_EVENT(svcrdma_qp_error, TP_PROTO( const struct ib_event *event, diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 420e80e56e55..ed168b0e2c53 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -487,7 +487,11 @@ TRACE_EVENT(sched_process_hang, ); #endif /* CONFIG_DETECT_HUNG_TASK */ -DECLARE_EVENT_CLASS(sched_move_task_template, +/* + * Tracks migration of tasks from one runqueue to another. Can be used to + * detect if automatic NUMA balancing is bouncing between nodes. + */ +TRACE_EVENT(sched_move_numa, TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), @@ -519,23 +523,7 @@ DECLARE_EVENT_CLASS(sched_move_task_template, __entry->dst_cpu, __entry->dst_nid) ); -/* - * Tracks migration of tasks from one runqueue to another. Can be used to - * detect if automatic NUMA balancing is bouncing between nodes - */ -DEFINE_EVENT(sched_move_task_template, sched_move_numa, - TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), - - TP_ARGS(tsk, src_cpu, dst_cpu) -); - -DEFINE_EVENT(sched_move_task_template, sched_stick_numa, - TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), - - TP_ARGS(tsk, src_cpu, dst_cpu) -); - -TRACE_EVENT(sched_swap_numa, +DECLARE_EVENT_CLASS(sched_numa_pair_template, TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu), @@ -561,11 +549,11 @@ TRACE_EVENT(sched_swap_numa, __entry->src_ngid = task_numa_group_id(src_tsk); __entry->src_cpu = src_cpu; __entry->src_nid = cpu_to_node(src_cpu); - __entry->dst_pid = task_pid_nr(dst_tsk); - __entry->dst_tgid = task_tgid_nr(dst_tsk); - __entry->dst_ngid = task_numa_group_id(dst_tsk); + __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0; + __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0; + __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0; __entry->dst_cpu = dst_cpu; - __entry->dst_nid = cpu_to_node(dst_cpu); + __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1; ), TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", @@ -575,6 +563,23 @@ TRACE_EVENT(sched_swap_numa, __entry->dst_cpu, __entry->dst_nid) ); +DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa, + + TP_PROTO(struct task_struct *src_tsk, int src_cpu, + struct task_struct *dst_tsk, int dst_cpu), + + TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) +); + +DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, + + TP_PROTO(struct task_struct *src_tsk, int src_cpu, + struct task_struct *dst_tsk, int dst_cpu), + + TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) +); + + /* * Tracepoint for waking a polling cpu without an IPI. */ @@ -613,6 +618,10 @@ DECLARE_TRACE(pelt_dl_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); +DECLARE_TRACE(pelt_thermal_tp, + TP_PROTO(struct rq *rq), + TP_ARGS(rq)); + DECLARE_TRACE(pelt_irq_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ee993575d2fa..ffd2215950dc 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -14,6 +14,49 @@ #include <linux/net.h> #include <linux/tracepoint.h> +DECLARE_EVENT_CLASS(xdr_buf_class, + TP_PROTO( + const struct xdr_buf *xdr + ), + + TP_ARGS(xdr), + + TP_STRUCT__entry( + __field(const void *, head_base) + __field(size_t, head_len) + __field(const void *, tail_base) + __field(size_t, tail_len) + __field(unsigned int, page_len) + __field(unsigned int, msg_len) + ), + + TP_fast_assign( + __entry->head_base = xdr->head[0].iov_base; + __entry->head_len = xdr->head[0].iov_len; + __entry->tail_base = xdr->tail[0].iov_base; + __entry->tail_len = xdr->tail[0].iov_len; + __entry->page_len = xdr->page_len; + __entry->msg_len = xdr->len; + ), + + TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u", + __entry->head_base, __entry->head_len, __entry->page_len, + __entry->tail_base, __entry->tail_len, __entry->msg_len + ) +); + +#define DEFINE_XDRBUF_EVENT(name) \ + DEFINE_EVENT(xdr_buf_class, name, \ + TP_PROTO( \ + const struct xdr_buf *xdr \ + ), \ + TP_ARGS(xdr)) + +DEFINE_XDRBUF_EVENT(xprt_sendto); +DEFINE_XDRBUF_EVENT(xprt_recvfrom); +DEFINE_XDRBUF_EVENT(svc_recvfrom); +DEFINE_XDRBUF_EVENT(svc_sendto); + TRACE_DEFINE_ENUM(RPC_AUTH_OK); TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED); TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDCRED); @@ -1292,6 +1335,39 @@ DECLARE_EVENT_CLASS(svc_deferred_event, DEFINE_SVC_DEFERRED_EVENT(drop); DEFINE_SVC_DEFERRED_EVENT(revisit); +DECLARE_EVENT_CLASS(cache_event, + TP_PROTO( + const struct cache_detail *cd, + const struct cache_head *h + ), + + TP_ARGS(cd, h), + + TP_STRUCT__entry( + __field(const struct cache_head *, h) + __string(name, cd->name) + ), + + TP_fast_assign( + __entry->h = h; + __assign_str(name, cd->name); + ), + + TP_printk("cache=%s entry=%p", __get_str(name), __entry->h) +); +#define DEFINE_CACHE_EVENT(name) \ + DEFINE_EVENT(cache_event, name, \ + TP_PROTO( \ + const struct cache_detail *cd, \ + const struct cache_head *h \ + ), \ + TP_ARGS(cd, h)) +DEFINE_CACHE_EVENT(cache_entry_expired); +DEFINE_CACHE_EVENT(cache_entry_upcall); +DEFINE_CACHE_EVENT(cache_entry_update); +DEFINE_CACHE_EVENT(cache_entry_make_negative); +DEFINE_CACHE_EVENT(cache_entry_no_listener); + #endif /* _TRACE_SUNRPC_H */ #include <trace/define_trace.h> diff --git a/include/trace/events/target.h b/include/trace/events/target.h index 914a872dd343..77408edd29d2 100644 --- a/include/trace/events/target.h +++ b/include/trace/events/target.h @@ -137,6 +137,7 @@ TRACE_EVENT(target_sequencer_start, TP_STRUCT__entry( __field( unsigned int, unpacked_lun ) + __field( unsigned long long, tag ) __field( unsigned int, opcode ) __field( unsigned int, data_length ) __field( unsigned int, task_attribute ) @@ -146,6 +147,7 @@ TRACE_EVENT(target_sequencer_start, TP_fast_assign( __entry->unpacked_lun = cmd->orig_fe_lun; + __entry->tag = cmd->tag; __entry->opcode = cmd->t_task_cdb[0]; __entry->data_length = cmd->data_length; __entry->task_attribute = cmd->sam_task_attr; @@ -153,9 +155,9 @@ TRACE_EVENT(target_sequencer_start, __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); ), - TP_printk("%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)", + TP_printk("%s -> LUN %03u tag %#llx %s data_length %6u CDB %s (TA:%s C:%02x)", __get_str(initiator), __entry->unpacked_lun, - show_opcode_name(__entry->opcode), + __entry->tag, show_opcode_name(__entry->opcode), __entry->data_length, __print_hex(__entry->cdb, 16), show_task_attribute_name(__entry->task_attribute), scsi_command_size(__entry->cdb) <= 16 ? @@ -172,6 +174,7 @@ TRACE_EVENT(target_cmd_complete, TP_STRUCT__entry( __field( unsigned int, unpacked_lun ) + __field( unsigned long long, tag ) __field( unsigned int, opcode ) __field( unsigned int, data_length ) __field( unsigned int, task_attribute ) @@ -184,6 +187,7 @@ TRACE_EVENT(target_cmd_complete, TP_fast_assign( __entry->unpacked_lun = cmd->orig_fe_lun; + __entry->tag = cmd->tag; __entry->opcode = cmd->t_task_cdb[0]; __entry->data_length = cmd->data_length; __entry->task_attribute = cmd->sam_task_attr; @@ -195,8 +199,9 @@ TRACE_EVENT(target_cmd_complete, __assign_str(initiator, cmd->se_sess->se_node_acl->initiatorname); ), - TP_printk("%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)", + TP_printk("%s <- LUN %03u tag %#llx status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)", __get_str(initiator), __entry->unpacked_lun, + __entry->tag, show_scsi_status_name(__entry->scsi_status), __entry->sense_length, __entry->sense_length ? " / " : "", __print_hex(__entry->sense_data, __entry->sense_length), diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index a5ab2973e8dc..74bb594ccb25 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -323,7 +323,7 @@ TRACE_EVENT(mm_vmscan_writepage, TP_fast_assign( __entry->pfn = page_to_pfn(page); __entry->reclaim_flags = trace_reclaim_flags( - page_is_file_cache(page)); + page_is_file_lru(page)); ), TP_printk("page=%p pfn=%lu flags=%s", diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index ac3879829bb5..65f69723cbdc 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -125,9 +125,10 @@ extern "C" { /* Flag that BO sharing will be explicitly synchronized */ #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7) /* Flag that indicates allocating MQD gart on GFX9, where the mtype - * for the second page onward should be set to NC. + * for the second page onward should be set to NC. It should never + * be used by user space applications. */ -#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8) +#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8) /* Flag that BO may contain sensitive data that must be wiped before * releasing the memory */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 868bf7996c0f..808b48a93330 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -948,6 +948,8 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) +#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) + /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 829c0a48577f..2813e579b480 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1619,6 +1619,27 @@ struct drm_i915_gem_context_param { * By default, new contexts allow persistence. */ #define I915_CONTEXT_PARAM_PERSISTENCE 0xb + +/* + * I915_CONTEXT_PARAM_RINGSIZE: + * + * Sets the size of the CS ringbuffer to use for logical ring contexts. This + * applies a limit of how many batches can be queued to HW before the caller + * is blocked due to lack of space for more commands. + * + * Only reliably possible to be set prior to first use, i.e. during + * construction. At any later point, the current execution must be flushed as + * the ring can only be changed while the context is idle. Note, the ringsize + * can be specified as a constructor property, see + * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required. + * + * Only applies to the current set of engine and lost when those engines + * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES). + * + * Must be between 4 - 512 KiB, in intervals of page size [4 KiB]. + * Default is 16 KiB. + */ +#define I915_CONTEXT_PARAM_RINGSIZE 0xc /* Must be kept compact -- no holes and well documented */ __u64 value; diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h index 95a00fb867e6..1ec58d652a5a 100644 --- a/include/uapi/drm/lima_drm.h +++ b/include/uapi/drm/lima_drm.h @@ -32,12 +32,19 @@ struct drm_lima_get_param { __u64 value; /* out, parameter value */ }; +/* + * heap buffer dynamically increase backup memory size when GP task fail + * due to lack of heap memory. size field of heap buffer is an up bound of + * the backup memory which can be set to a fairly large value. + */ +#define LIMA_BO_FLAG_HEAP (1 << 0) + /** * create a buffer for used by GPU */ struct drm_lima_gem_create { __u32 size; /* in, buffer size */ - __u32 flags; /* in, currently no flags, must be zero */ + __u32 flags; /* in, buffer flags */ __u32 handle; /* out, GEM buffer handle */ __u32 pad; /* pad, must be zero */ }; diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index fcb741e3068f..02e917507479 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h @@ -86,6 +86,9 @@ extern "C" { * * DRM_VMW_PARAM_SM4_1 * SM4_1 support is enabled. + * + * DRM_VMW_PARAM_SM5 + * SM5 support is enabled. */ #define DRM_VMW_PARAM_NUM_STREAMS 0 @@ -103,6 +106,7 @@ extern "C" { #define DRM_VMW_PARAM_DX 12 #define DRM_VMW_PARAM_HW_CAPS2 13 #define DRM_VMW_PARAM_SM4_1 14 +#define DRM_VMW_PARAM_SM5 15 /** * enum drm_vmw_handle_type - handle type for ref ioctls @@ -1133,7 +1137,7 @@ struct drm_vmw_handle_close_arg { * svga3d surface flags split into 2, upper half and lower half. */ enum drm_vmw_surface_version { - drm_vmw_gb_surface_v1 + drm_vmw_gb_surface_v1, }; /** @@ -1144,6 +1148,7 @@ enum drm_vmw_surface_version { * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags. * @multisample_pattern: Multisampling pattern when msaa is supported. * @quality_level: Precision settings for each sample. + * @buffer_byte_stride: Buffer byte stride. * @must_be_zero: Reserved for future usage. * * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl. @@ -1152,10 +1157,11 @@ enum drm_vmw_surface_version { struct drm_vmw_gb_surface_create_ext_req { struct drm_vmw_gb_surface_create_req base; enum drm_vmw_surface_version version; - uint32_t svga3d_flags_upper_32_bits; - SVGA3dMSPattern multisample_pattern; - SVGA3dMSQualityLevel quality_level; - uint64_t must_be_zero; + __u32 svga3d_flags_upper_32_bits; + __u32 multisample_pattern; + __u32 quality_level; + __u32 buffer_byte_stride; + __u32 must_be_zero; }; /** diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 22f235260a3a..2e29a671d67e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -73,7 +73,7 @@ struct bpf_insn { /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ - __u8 data[0]; /* Arbitrary size */ + __u8 data[]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { @@ -111,6 +111,8 @@ enum bpf_cmd { BPF_MAP_LOOKUP_AND_DELETE_BATCH, BPF_MAP_UPDATE_BATCH, BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, }; enum bpf_map_type { @@ -181,6 +183,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_TRACING, BPF_PROG_TYPE_STRUCT_OPS, BPF_PROG_TYPE_EXT, + BPF_PROG_TYPE_LSM, }; enum bpf_attach_type { @@ -210,6 +213,8 @@ enum bpf_attach_type { BPF_TRACE_RAW_TP, BPF_TRACE_FENTRY, BPF_TRACE_FEXIT, + BPF_MODIFY_RETURN, + BPF_LSM_MAC, __MAX_BPF_ATTACH_TYPE }; @@ -325,44 +330,46 @@ enum bpf_attach_type { #define BPF_PSEUDO_CALL 1 /* flags for BPF_MAP_UPDATE_ELEM command */ -#define BPF_ANY 0 /* create new element or update existing */ -#define BPF_NOEXIST 1 /* create new element if it didn't exist */ -#define BPF_EXIST 2 /* update existing element */ -#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ +enum { + BPF_ANY = 0, /* create new element or update existing */ + BPF_NOEXIST = 1, /* create new element if it didn't exist */ + BPF_EXIST = 2, /* update existing element */ + BPF_F_LOCK = 4, /* spin_lock-ed map_lookup/map_update */ +}; /* flags for BPF_MAP_CREATE command */ -#define BPF_F_NO_PREALLOC (1U << 0) +enum { + BPF_F_NO_PREALLOC = (1U << 0), /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * which can scale and perform better. * Note, the LRU nodes (including free nodes) cannot be moved * across different LRU lists. */ -#define BPF_F_NO_COMMON_LRU (1U << 1) + BPF_F_NO_COMMON_LRU = (1U << 1), /* Specify numa node during map creation */ -#define BPF_F_NUMA_NODE (1U << 2) - -#define BPF_OBJ_NAME_LEN 16U + BPF_F_NUMA_NODE = (1U << 2), /* Flags for accessing BPF object from syscall side. */ -#define BPF_F_RDONLY (1U << 3) -#define BPF_F_WRONLY (1U << 4) + BPF_F_RDONLY = (1U << 3), + BPF_F_WRONLY = (1U << 4), /* Flag for stack_map, store build_id+offset instead of pointer */ -#define BPF_F_STACK_BUILD_ID (1U << 5) + BPF_F_STACK_BUILD_ID = (1U << 5), /* Zero-initialize hash function seed. This should only be used for testing. */ -#define BPF_F_ZERO_SEED (1U << 6) + BPF_F_ZERO_SEED = (1U << 6), /* Flags for accessing BPF object from program side. */ -#define BPF_F_RDONLY_PROG (1U << 7) -#define BPF_F_WRONLY_PROG (1U << 8) + BPF_F_RDONLY_PROG = (1U << 7), + BPF_F_WRONLY_PROG = (1U << 8), /* Clone map from listener for newly accepted socket */ -#define BPF_F_CLONE (1U << 9) + BPF_F_CLONE = (1U << 9), /* Enable memory-mapping BPF map */ -#define BPF_F_MMAPABLE (1U << 10) + BPF_F_MMAPABLE = (1U << 10), +}; /* Flags for BPF_PROG_QUERY. */ @@ -391,6 +398,8 @@ struct bpf_stack_build_id { }; }; +#define BPF_OBJ_NAME_LEN 16U + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -534,7 +543,7 @@ union bpf_attr { __u32 prog_cnt; } query; - struct { + struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ __u64 name; __u32 prog_fd; } raw_tracepoint; @@ -562,6 +571,24 @@ union bpf_attr { __u64 probe_offset; /* output: probe_offset */ __u64 probe_addr; /* output: probe_addr */ } task_fd_query; + + struct { /* struct used by BPF_LINK_CREATE command */ + __u32 prog_fd; /* eBPF program to attach */ + __u32 target_fd; /* object to attach to */ + __u32 attach_type; /* attach type */ + __u32 flags; /* extra flags */ + } link_create; + + struct { /* struct used by BPF_LINK_UPDATE command */ + __u32 link_fd; /* link fd */ + /* new program fd to update link with */ + __u32 new_prog_fd; + __u32 flags; /* extra flags */ + /* expected link's program fd; is specified only if + * BPF_F_REPLACE flag is set in flags */ + __u32 old_prog_fd; + } link_update; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF @@ -2890,6 +2917,114 @@ union bpf_attr { * Obtain the 64bit jiffies * Return * The 64 bit jiffies + * + * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * Description + * For an eBPF program attached to a perf event, retrieve the + * branch records (struct perf_branch_entry) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size + * *size* bytes. + * Return + * On success, number of bytes written to *buf*. On error, a + * negative value. + * + * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to + * instead return the number of bytes required to store all the + * branch entries. If this flag is set, *buf* may be NULL. + * + * **-EINVAL** if arguments invalid or **size** not a multiple + * of sizeof(struct perf_branch_entry). + * + * **-ENOENT** if architecture does not support branch records. + * + * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * Description + * Returns 0 on success, values for *pid* and *tgid* as seen from the current + * *namespace* will be returned in *nsdata*. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** if dev and inum supplied don't match dev_t and inode number + * with nsfs of current task, or if dev conversion to dev_t lost high bits. + * + * **-ENOENT** if pidns does not exists for the current task. + * + * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * Description + * Write raw *data* blob into a special BPF perf event held by + * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf + * event must have the following attributes: **PERF_SAMPLE_RAW** + * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and + * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. + * + * The *flags* are used to indicate the index in *map* for which + * the value must be put, masked with **BPF_F_INDEX_MASK**. + * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** + * to indicate that the index of the current CPU core should be + * used. + * + * The value to write, of *size*, is passed through eBPF stack and + * pointed by *data*. + * + * *ctx* is a pointer to in-kernel struct xdp_buff. + * + * This helper is similar to **bpf_perf_eventoutput**\ () but + * restricted to raw_tracepoint bpf programs. + * Return + * 0 on success, or a negative error in case of failure. + * + * u64 bpf_get_netns_cookie(void *ctx) + * Description + * Retrieve the cookie (generated by the kernel) of the network + * namespace the input *ctx* is associated with. The network + * namespace cookie remains stable for its lifetime and provides + * a global identifier that can be assumed unique. If *ctx* is + * NULL, then the helper returns the cookie for the initial + * network namespace. The cookie itself is very similar to that + * of bpf_get_socket_cookie() helper, but for network namespaces + * instead of sockets. + * Return + * A 8-byte long opaque number. + * + * u64 bpf_get_current_ancestor_cgroup_id(int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of the cgroup associated + * with the current task at the *ancestor_level*. The root cgroup + * is at *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with the current task, then return value will be the + * same as that of **bpf_get_current_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with the current task. + * + * The format of returned id and helper limitations are same as in + * **bpf_get_current_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * Description + * Assign the *sk* to the *skb*. When combined with appropriate + * routing configuration to receive the packet towards the socket, + * will cause *skb* to be delivered to the specified socket. + * Subsequent redirection of *skb* via **bpf_redirect**\ (), + * **bpf_clone_redirect**\ () or other methods outside of BPF may + * interfere with successful delivery to the socket. + * + * This operation is only valid from TC ingress path. + * + * The *flags* argument must be zero. + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EINVAL** Unsupported flags specified. + * * **-ENOENT** Socket is unavailable for assignment. + * * **-ENETUNREACH** Socket is unreachable (wrong netns). + * * **-EOPNOTSUPP** Unsupported operation, for example a + * call from outside of TC ingress. + * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport). */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3010,7 +3145,13 @@ union bpf_attr { FN(probe_read_kernel_str), \ FN(tcp_send_ack), \ FN(send_signal_thread), \ - FN(jiffies64), + FN(jiffies64), \ + FN(read_branch_records), \ + FN(get_ns_current_pid_tgid), \ + FN(xdp_output), \ + FN(get_netns_cookie), \ + FN(get_current_ancestor_cgroup_id), \ + FN(sk_assign), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -3025,69 +3166,100 @@ enum bpf_func_id { /* All flags used by eBPF helper functions, placed here. */ /* BPF_FUNC_skb_store_bytes flags. */ -#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) -#define BPF_F_INVALIDATE_HASH (1ULL << 1) +enum { + BPF_F_RECOMPUTE_CSUM = (1ULL << 0), + BPF_F_INVALIDATE_HASH = (1ULL << 1), +}; /* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. * First 4 bits are for passing the header field size. */ -#define BPF_F_HDR_FIELD_MASK 0xfULL +enum { + BPF_F_HDR_FIELD_MASK = 0xfULL, +}; /* BPF_FUNC_l4_csum_replace flags. */ -#define BPF_F_PSEUDO_HDR (1ULL << 4) -#define BPF_F_MARK_MANGLED_0 (1ULL << 5) -#define BPF_F_MARK_ENFORCE (1ULL << 6) +enum { + BPF_F_PSEUDO_HDR = (1ULL << 4), + BPF_F_MARK_MANGLED_0 = (1ULL << 5), + BPF_F_MARK_ENFORCE = (1ULL << 6), +}; /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ -#define BPF_F_INGRESS (1ULL << 0) +enum { + BPF_F_INGRESS = (1ULL << 0), +}; /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ -#define BPF_F_TUNINFO_IPV6 (1ULL << 0) +enum { + BPF_F_TUNINFO_IPV6 = (1ULL << 0), +}; /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ -#define BPF_F_SKIP_FIELD_MASK 0xffULL -#define BPF_F_USER_STACK (1ULL << 8) +enum { + BPF_F_SKIP_FIELD_MASK = 0xffULL, + BPF_F_USER_STACK = (1ULL << 8), /* flags used by BPF_FUNC_get_stackid only. */ -#define BPF_F_FAST_STACK_CMP (1ULL << 9) -#define BPF_F_REUSE_STACKID (1ULL << 10) + BPF_F_FAST_STACK_CMP = (1ULL << 9), + BPF_F_REUSE_STACKID = (1ULL << 10), /* flags used by BPF_FUNC_get_stack only. */ -#define BPF_F_USER_BUILD_ID (1ULL << 11) + BPF_F_USER_BUILD_ID = (1ULL << 11), +}; /* BPF_FUNC_skb_set_tunnel_key flags. */ -#define BPF_F_ZERO_CSUM_TX (1ULL << 1) -#define BPF_F_DONT_FRAGMENT (1ULL << 2) -#define BPF_F_SEQ_NUMBER (1ULL << 3) +enum { + BPF_F_ZERO_CSUM_TX = (1ULL << 1), + BPF_F_DONT_FRAGMENT = (1ULL << 2), + BPF_F_SEQ_NUMBER = (1ULL << 3), +}; /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. */ -#define BPF_F_INDEX_MASK 0xffffffffULL -#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK +enum { + BPF_F_INDEX_MASK = 0xffffffffULL, + BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK, /* BPF_FUNC_perf_event_output for sk_buff input context. */ -#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) + BPF_F_CTXLEN_MASK = (0xfffffULL << 32), +}; /* Current network namespace */ -#define BPF_F_CURRENT_NETNS (-1L) +enum { + BPF_F_CURRENT_NETNS = (-1L), +}; /* BPF_FUNC_skb_adjust_room flags. */ -#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) +enum { + BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = (1ULL << 1), + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), +}; -#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff -#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56 +enum { + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff, + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56, +}; -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1) -#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) -#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) -#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) #define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ BPF_ADJ_ROOM_ENCAP_L2_MASK) \ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) /* BPF_FUNC_sysctl_get_name flags. */ -#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0) +enum { + BPF_F_SYSCTL_BASE_NAME = (1ULL << 0), +}; /* BPF_FUNC_sk_storage_get flags */ -#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0) +enum { + BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0), +}; + +/* BPF_FUNC_read_branch_records flags. */ +enum { + BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), +}; /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { @@ -3153,6 +3325,7 @@ struct __sk_buff { __u32 wire_len; __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); + __u32 gso_size; }; struct bpf_tunnel_key { @@ -3505,13 +3678,14 @@ struct bpf_sock_ops { }; /* Definitions for bpf_sock_ops_cb_flags */ -#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) -#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) -#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) -#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) -#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently - * supported cb flags - */ +enum { + BPF_SOCK_OPS_RTO_CB_FLAG = (1<<0), + BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1), + BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2), + BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3), +/* Mask of all currently supported cb flags */ + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF, +}; /* List of known BPF sock_ops operators. * New entries can only be added at the end @@ -3590,8 +3764,10 @@ enum { BPF_TCP_MAX_STATES /* Leave at the end! */ }; -#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ -#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ +enum { + TCP_BPF_IW = 1001, /* Set TCP initial congestion window */ + TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */ +}; struct bpf_perf_event_value { __u64 counter; @@ -3599,12 +3775,16 @@ struct bpf_perf_event_value { __u64 running; }; -#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) -#define BPF_DEVCG_ACC_READ (1ULL << 1) -#define BPF_DEVCG_ACC_WRITE (1ULL << 2) +enum { + BPF_DEVCG_ACC_MKNOD = (1ULL << 0), + BPF_DEVCG_ACC_READ = (1ULL << 1), + BPF_DEVCG_ACC_WRITE = (1ULL << 2), +}; -#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) -#define BPF_DEVCG_DEV_CHAR (1ULL << 1) +enum { + BPF_DEVCG_DEV_BLOCK = (1ULL << 0), + BPF_DEVCG_DEV_CHAR = (1ULL << 1), +}; struct bpf_cgroup_dev_ctx { /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ @@ -3620,8 +3800,10 @@ struct bpf_raw_tracepoint_args { /* DIRECT: Skip the FIB rules and go to FIB table associated with device * OUTPUT: Do lookup from egress perspective; default is ingress */ -#define BPF_FIB_LOOKUP_DIRECT (1U << 0) -#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) +enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), +}; enum { BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ @@ -3693,9 +3875,11 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; -#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) -#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) +enum { + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = (1U << 0), + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = (1U << 1), + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = (1U << 2), +}; struct bpf_flow_keys { __u16 nhoff; @@ -3761,4 +3945,8 @@ struct bpf_sockopt { __s32 retval; }; +struct bpf_pidns_info { + __u32 pid; + __u32 tgid; +}; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 7a8bc8b920f5..8134924cfc17 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -36,17 +36,24 @@ struct btrfs_ioctl_vol_args { #define BTRFS_DEVICE_PATH_NAME_MAX 1024 #define BTRFS_SUBVOL_NAME_MAX 4039 -#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) +/* + * Deprecated since 5.7: + * + * BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) + */ + #define BTRFS_SUBVOL_RDONLY (1ULL << 1) #define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2) #define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3) +#define BTRFS_SUBVOL_SPEC_BY_ID (1ULL << 4) + #define BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED \ - (BTRFS_SUBVOL_CREATE_ASYNC | \ - BTRFS_SUBVOL_RDONLY | \ + (BTRFS_SUBVOL_RDONLY | \ BTRFS_SUBVOL_QGROUP_INHERIT | \ - BTRFS_DEVICE_SPEC_BY_ID) + BTRFS_DEVICE_SPEC_BY_ID | \ + BTRFS_SUBVOL_SPEC_BY_ID) #define BTRFS_FSID_SIZE 16 #define BTRFS_UUID_SIZE 16 @@ -97,16 +104,29 @@ struct btrfs_ioctl_qgroup_limit_args { }; /* - * flags for subvolumes + * Arguments for specification of subvolumes or devices, supporting by-name or + * by-id and flags * - * Used by: - * struct btrfs_ioctl_vol_args_v2.flags + * The set of supported flags depends on the ioctl * * BTRFS_SUBVOL_RDONLY is also provided/consumed by the following ioctls: * - BTRFS_IOC_SUBVOL_GETFLAGS * - BTRFS_IOC_SUBVOL_SETFLAGS */ +/* Supported flags for BTRFS_IOC_RM_DEV_V2 */ +#define BTRFS_DEVICE_REMOVE_ARGS_MASK \ + (BTRFS_DEVICE_SPEC_BY_ID) + +/* Supported flags for BTRFS_IOC_SNAP_CREATE_V2 and BTRFS_IOC_SUBVOL_CREATE_V2 */ +#define BTRFS_SUBVOL_CREATE_ARGS_MASK \ + (BTRFS_SUBVOL_RDONLY | \ + BTRFS_SUBVOL_QGROUP_INHERIT) + +/* Supported flags for BTRFS_IOC_SNAP_DESTROY_V2 */ +#define BTRFS_SUBVOL_DELETE_ARGS_MASK \ + (BTRFS_SUBVOL_SPEC_BY_ID) + struct btrfs_ioctl_vol_args_v2 { __s64 fd; __u64 transid; @@ -121,6 +141,7 @@ struct btrfs_ioctl_vol_args_v2 { union { char name[BTRFS_SUBVOL_NAME_MAX + 1]; __u64 devid; + __u64 subvolid; }; }; @@ -949,5 +970,7 @@ enum btrfs_err_code { struct btrfs_ioctl_get_subvol_rootref_args) #define BTRFS_IOC_INO_LOOKUP_USER _IOWR(BTRFS_IOCTL_MAGIC, 62, \ struct btrfs_ioctl_ino_lookup_user_args) +#define BTRFS_IOC_SNAP_DESTROY_V2 _IOW(BTRFS_IOCTL_MAGIC, 63, \ + struct btrfs_ioctl_vol_args_v2) #endif /* _UAPI_LINUX_BTRFS_H */ diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h index aac550a52f80..8847dbf24151 100644 --- a/include/uapi/linux/coresight-stm.h +++ b/include/uapi/linux/coresight-stm.h @@ -2,8 +2,10 @@ #ifndef __UAPI_CORESIGHT_STM_H_ #define __UAPI_CORESIGHT_STM_H_ -#define STM_FLAG_TIMESTAMPED BIT(3) -#define STM_FLAG_GUARANTEED BIT(7) +#include <linux/const.h> + +#define STM_FLAG_TIMESTAMPED _BITUL(3) +#define STM_FLAG_GUARANTEED _BITUL(7) /* * The CoreSight STM supports guaranteed and invariant timing diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index ae37fd4d194a..1ae90e06c06d 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -117,6 +117,11 @@ enum devlink_command { DEVLINK_CMD_TRAP_GROUP_NEW, DEVLINK_CMD_TRAP_GROUP_DEL, + DEVLINK_CMD_TRAP_POLICER_GET, /* can dump */ + DEVLINK_CMD_TRAP_POLICER_SET, + DEVLINK_CMD_TRAP_POLICER_NEW, + DEVLINK_CMD_TRAP_POLICER_DEL, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -187,6 +192,7 @@ enum devlink_port_flavour { * for the PCI VF. It is an internal * port that faces the PCI VF. */ + DEVLINK_PORT_FLAVOUR_VIRTUAL, /* Any virtual port facing the user. */ }; enum devlink_param_cmode { @@ -216,6 +222,7 @@ enum devlink_param_reset_dev_on_drv_probe_value { enum { DEVLINK_ATTR_STATS_RX_PACKETS, /* u64 */ DEVLINK_ATTR_STATS_RX_BYTES, /* u64 */ + DEVLINK_ATTR_STATS_RX_DROPPED, /* u64 */ __DEVLINK_ATTR_STATS_MAX, DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1 @@ -252,6 +259,8 @@ enum devlink_trap_type { enum { /* Trap can report input port as metadata */ DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT, + /* Trap can report flow action cookie as metadata */ + DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE, }; enum devlink_attr { @@ -426,6 +435,13 @@ enum devlink_attr { DEVLINK_ATTR_NETNS_FD, /* u32 */ DEVLINK_ATTR_NETNS_PID, /* u32 */ DEVLINK_ATTR_NETNS_ID, /* u32 */ + + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP, /* u8 */ + + DEVLINK_ATTR_TRAP_POLICER_ID, /* u32 */ + DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */ + DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 2df8ceca1f9b..6622912c2342 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -272,9 +272,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 41 +#define DM_VERSION_MINOR 42 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2019-09-16)" +#define DM_VERSION_EXTRA "-ioctl (2020-02-27)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 4295ebfa2f91..92f737f10117 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -596,6 +596,9 @@ struct ethtool_pauseparam { * @ETH_SS_LINK_MODES: link mode names * @ETH_SS_MSG_CLASSES: debug message class names * @ETH_SS_WOL_MODES: wake-on-lan modes + * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags + * @ETH_SS_TS_TX_TYPES: timestamping Tx types + * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -610,6 +613,9 @@ enum ethtool_stringset { ETH_SS_LINK_MODES, ETH_SS_MSG_CLASSES, ETH_SS_WOL_MODES, + ETH_SS_SOF_TIMESTAMPING, + ETH_SS_TS_TX_TYPES, + ETH_SS_TS_RX_FILTERS, /* add new constants above here */ ETH_SS_COUNT @@ -1330,6 +1336,7 @@ enum ethtool_fec_config_bits { ETHTOOL_FEC_OFF_BIT, ETHTOOL_FEC_RS_BIT, ETHTOOL_FEC_BASER_BIT, + ETHTOOL_FEC_LLRS_BIT, }; #define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT) @@ -1337,6 +1344,7 @@ enum ethtool_fec_config_bits { #define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT) #define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT) #define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT) +#define ETHTOOL_FEC_LLRS (1 << ETHTOOL_FEC_LLRS_BIT) /* CMDs currently supported */ #define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings. @@ -1521,7 +1529,7 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, - + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 7e0b460f872c..7fde76366ba4 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -24,6 +24,21 @@ enum { ETHTOOL_MSG_DEBUG_SET, ETHTOOL_MSG_WOL_GET, ETHTOOL_MSG_WOL_SET, + ETHTOOL_MSG_FEATURES_GET, + ETHTOOL_MSG_FEATURES_SET, + ETHTOOL_MSG_PRIVFLAGS_GET, + ETHTOOL_MSG_PRIVFLAGS_SET, + ETHTOOL_MSG_RINGS_GET, + ETHTOOL_MSG_RINGS_SET, + ETHTOOL_MSG_CHANNELS_GET, + ETHTOOL_MSG_CHANNELS_SET, + ETHTOOL_MSG_COALESCE_GET, + ETHTOOL_MSG_COALESCE_SET, + ETHTOOL_MSG_PAUSE_GET, + ETHTOOL_MSG_PAUSE_SET, + ETHTOOL_MSG_EEE_GET, + ETHTOOL_MSG_EEE_SET, + ETHTOOL_MSG_TSINFO_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -43,6 +58,22 @@ enum { ETHTOOL_MSG_DEBUG_NTF, ETHTOOL_MSG_WOL_GET_REPLY, ETHTOOL_MSG_WOL_NTF, + ETHTOOL_MSG_FEATURES_GET_REPLY, + ETHTOOL_MSG_FEATURES_SET_REPLY, + ETHTOOL_MSG_FEATURES_NTF, + ETHTOOL_MSG_PRIVFLAGS_GET_REPLY, + ETHTOOL_MSG_PRIVFLAGS_NTF, + ETHTOOL_MSG_RINGS_GET_REPLY, + ETHTOOL_MSG_RINGS_NTF, + ETHTOOL_MSG_CHANNELS_GET_REPLY, + ETHTOOL_MSG_CHANNELS_NTF, + ETHTOOL_MSG_COALESCE_GET_REPLY, + ETHTOOL_MSG_COALESCE_NTF, + ETHTOOL_MSG_PAUSE_GET_REPLY, + ETHTOOL_MSG_PAUSE_NTF, + ETHTOOL_MSG_EEE_GET_REPLY, + ETHTOOL_MSG_EEE_NTF, + ETHTOOL_MSG_TSINFO_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -228,6 +259,150 @@ enum { ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1 }; +/* FEATURES */ + +enum { + ETHTOOL_A_FEATURES_UNSPEC, + ETHTOOL_A_FEATURES_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_FEATURES_HW, /* bitset */ + ETHTOOL_A_FEATURES_WANTED, /* bitset */ + ETHTOOL_A_FEATURES_ACTIVE, /* bitset */ + ETHTOOL_A_FEATURES_NOCHANGE, /* bitset */ + + /* add new constants above here */ + __ETHTOOL_A_FEATURES_CNT, + ETHTOOL_A_FEATURES_MAX = __ETHTOOL_A_FEATURES_CNT - 1 +}; + +/* PRIVFLAGS */ + +enum { + ETHTOOL_A_PRIVFLAGS_UNSPEC, + ETHTOOL_A_PRIVFLAGS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_PRIVFLAGS_FLAGS, /* bitset */ + + /* add new constants above here */ + __ETHTOOL_A_PRIVFLAGS_CNT, + ETHTOOL_A_PRIVFLAGS_MAX = __ETHTOOL_A_PRIVFLAGS_CNT - 1 +}; + +/* RINGS */ + +enum { + ETHTOOL_A_RINGS_UNSPEC, + ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_RINGS_RX_MAX, /* u32 */ + ETHTOOL_A_RINGS_RX_MINI_MAX, /* u32 */ + ETHTOOL_A_RINGS_RX_JUMBO_MAX, /* u32 */ + ETHTOOL_A_RINGS_TX_MAX, /* u32 */ + ETHTOOL_A_RINGS_RX, /* u32 */ + ETHTOOL_A_RINGS_RX_MINI, /* u32 */ + ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */ + ETHTOOL_A_RINGS_TX, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_RINGS_CNT, + ETHTOOL_A_RINGS_MAX = (__ETHTOOL_A_RINGS_CNT - 1) +}; + +/* CHANNELS */ + +enum { + ETHTOOL_A_CHANNELS_UNSPEC, + ETHTOOL_A_CHANNELS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_CHANNELS_RX_MAX, /* u32 */ + ETHTOOL_A_CHANNELS_TX_MAX, /* u32 */ + ETHTOOL_A_CHANNELS_OTHER_MAX, /* u32 */ + ETHTOOL_A_CHANNELS_COMBINED_MAX, /* u32 */ + ETHTOOL_A_CHANNELS_RX_COUNT, /* u32 */ + ETHTOOL_A_CHANNELS_TX_COUNT, /* u32 */ + ETHTOOL_A_CHANNELS_OTHER_COUNT, /* u32 */ + ETHTOOL_A_CHANNELS_COMBINED_COUNT, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_CHANNELS_CNT, + ETHTOOL_A_CHANNELS_MAX = (__ETHTOOL_A_CHANNELS_CNT - 1) +}; + +/* COALESCE */ + +enum { + ETHTOOL_A_COALESCE_UNSPEC, + ETHTOOL_A_COALESCE_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_COALESCE_RX_USECS, /* u32 */ + ETHTOOL_A_COALESCE_RX_MAX_FRAMES, /* u32 */ + ETHTOOL_A_COALESCE_RX_USECS_IRQ, /* u32 */ + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, /* u32 */ + ETHTOOL_A_COALESCE_TX_USECS, /* u32 */ + ETHTOOL_A_COALESCE_TX_MAX_FRAMES, /* u32 */ + ETHTOOL_A_COALESCE_TX_USECS_IRQ, /* u32 */ + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, /* u32 */ + ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, /* u32 */ + ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, /* u8 */ + ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, /* u8 */ + ETHTOOL_A_COALESCE_PKT_RATE_LOW, /* u32 */ + ETHTOOL_A_COALESCE_RX_USECS_LOW, /* u32 */ + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, /* u32 */ + ETHTOOL_A_COALESCE_TX_USECS_LOW, /* u32 */ + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, /* u32 */ + ETHTOOL_A_COALESCE_PKT_RATE_HIGH, /* u32 */ + ETHTOOL_A_COALESCE_RX_USECS_HIGH, /* u32 */ + ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, /* u32 */ + ETHTOOL_A_COALESCE_TX_USECS_HIGH, /* u32 */ + ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, /* u32 */ + ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_COALESCE_CNT, + ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1) +}; + +/* PAUSE */ + +enum { + ETHTOOL_A_PAUSE_UNSPEC, + ETHTOOL_A_PAUSE_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_PAUSE_AUTONEG, /* u8 */ + ETHTOOL_A_PAUSE_RX, /* u8 */ + ETHTOOL_A_PAUSE_TX, /* u8 */ + + /* add new constants above here */ + __ETHTOOL_A_PAUSE_CNT, + ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1) +}; + +/* EEE */ + +enum { + ETHTOOL_A_EEE_UNSPEC, + ETHTOOL_A_EEE_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_EEE_MODES_OURS, /* bitset */ + ETHTOOL_A_EEE_MODES_PEER, /* bitset */ + ETHTOOL_A_EEE_ACTIVE, /* u8 */ + ETHTOOL_A_EEE_ENABLED, /* u8 */ + ETHTOOL_A_EEE_TX_LPI_ENABLED, /* u8 */ + ETHTOOL_A_EEE_TX_LPI_TIMER, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_EEE_CNT, + ETHTOOL_A_EEE_MAX = (__ETHTOOL_A_EEE_CNT - 1) +}; + +/* TSINFO */ + +enum { + ETHTOOL_A_TSINFO_UNSPEC, + ETHTOOL_A_TSINFO_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_TSINFO_TIMESTAMPING, /* bitset */ + ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */ + ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */ + ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_TSINFO_CNT, + ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index b9effa6f8503..a88c7c6d0692 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -24,10 +24,11 @@ #define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ #define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ +#define FAN_DIR_MODIFY 0x00080000 /* Directory entry was modified */ -#define FAN_ONDIR 0x40000000 /* event occurred against dir */ +#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */ -#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */ +#define FAN_ONDIR 0x40000000 /* Event occurred against dir */ /* helper events */ #define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */ @@ -116,6 +117,7 @@ struct fanotify_event_metadata { }; #define FAN_EVENT_INFO_TYPE_FID 1 +#define FAN_EVENT_INFO_TYPE_DFID_NAME 2 /* Variable length info record following event metadata */ struct fanotify_event_info_header { @@ -124,7 +126,12 @@ struct fanotify_event_info_header { __u16 len; }; -/* Unique file identifier info record */ +/* + * Unique file identifier info record. This is used both for + * FAN_EVENT_INFO_TYPE_FID records and for FAN_EVENT_INFO_TYPE_DFID_NAME + * records. For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null + * terminated name immediately after the file handle. + */ struct fanotify_event_info_fid { struct fanotify_event_info_header hdr; __kernel_fsid_t fsid; diff --git a/include/uapi/linux/fdreg.h b/include/uapi/linux/fdreg.h index 5e2981d5c523..1318881954e1 100644 --- a/include/uapi/linux/fdreg.h +++ b/include/uapi/linux/fdreg.h @@ -7,26 +7,18 @@ * Handbook", Sanches and Canton. */ -#ifdef FDPATCHES -#define FD_IOPORT fdc_state[fdc].address -#else -/* It would be a lot saner just to force fdc_state[fdc].address to always - be set ! FIXME */ -#define FD_IOPORT 0x3f0 -#endif - /* Fd controller regs. S&C, about page 340 */ -#define FD_STATUS (4 + FD_IOPORT ) -#define FD_DATA (5 + FD_IOPORT ) +#define FD_STATUS 4 +#define FD_DATA 5 /* Digital Output Register */ -#define FD_DOR (2 + FD_IOPORT ) +#define FD_DOR 2 /* Digital Input Register (read) */ -#define FD_DIR (7 + FD_IOPORT ) +#define FD_DIR 7 /* Diskette Control Register (write)*/ -#define FD_DCR (7 + FD_IOPORT ) +#define FD_DCR 7 /* Bits of main status register */ #define STATUS_BUSYMASK 0x0F /* drive busy mask */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 0d8a6f47711c..a10e3cdc2839 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -163,6 +163,7 @@ struct fscrypt_get_key_status_arg { #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg) #define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) +#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16]) /**********************************************************************/ diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h index 799cf823d493..0206383c0383 100644 --- a/include/uapi/linux/gpio.h +++ b/include/uapi/linux/gpio.h @@ -18,7 +18,7 @@ * struct gpiochip_info - Information about a certain GPIO chip * @name: the Linux kernel name of this GPIO chip * @label: a functional name for this GPIO chip, such as a product - * number, may be NULL + * number, may be empty * @lines: number of GPIO lines on this chip */ struct gpiochip_info { @@ -44,10 +44,10 @@ struct gpiochip_info { * @flags: various flags for this line * @name: the name of this GPIO line, such as the output pin of the line on the * chip, a rail or a pin header name on a board, as specified by the gpio - * chip, may be NULL + * chip, may be empty * @consumer: a functional name for the consumer of this GPIO line as set by - * whatever is using it, will be NULL if there is no current user but may - * also be NULL if the consumer doesn't set this up + * whatever is using it, will be empty if there is no current user but may + * also be empty if the consumer doesn't set this up */ struct gpioline_info { __u32 line_offset; @@ -59,6 +59,34 @@ struct gpioline_info { /* Maximum number of requested handles */ #define GPIOHANDLES_MAX 64 +/* Possible line status change events */ +enum { + GPIOLINE_CHANGED_REQUESTED = 1, + GPIOLINE_CHANGED_RELEASED, + GPIOLINE_CHANGED_CONFIG, +}; + +/** + * struct gpioline_info_changed - Information about a change in status + * of a GPIO line + * @info: updated line information + * @timestamp: estimate of time of status change occurrence, in nanoseconds + * and GPIOLINE_CHANGED_CONFIG + * @event_type: one of GPIOLINE_CHANGED_REQUESTED, GPIOLINE_CHANGED_RELEASED + * + * Note: struct gpioline_info embedded here has 32-bit alignment on its own, + * but it works fine with 64-bit alignment too. With its 72 byte size, we can + * guarantee there are no implicit holes between it and subsequent members. + * The 20-byte padding at the end makes sure we don't add any implicit padding + * at the end of the structure on 64-bit architectures. + */ +struct gpioline_info_changed { + struct gpioline_info info; + __u64 timestamp; + __u32 event_type; + __u32 padding[5]; /* for future use */ +}; + /* Linerequest flags */ #define GPIOHANDLE_REQUEST_INPUT (1UL << 0) #define GPIOHANDLE_REQUEST_OUTPUT (1UL << 1) @@ -176,6 +204,8 @@ struct gpioevent_data { #define GPIO_GET_CHIPINFO_IOCTL _IOR(0xB4, 0x01, struct gpiochip_info) #define GPIO_GET_LINEINFO_IOCTL _IOWR(0xB4, 0x02, struct gpioline_info) +#define GPIO_GET_LINEINFO_WATCH_IOCTL _IOWR(0xB4, 0x0b, struct gpioline_info) +#define GPIO_GET_LINEINFO_UNWATCH_IOCTL _IOWR(0xB4, 0x0c, __u32) #define GPIO_GET_LINEHANDLE_IOCTL _IOWR(0xB4, 0x03, struct gpiohandle_request) #define GPIO_GET_LINEEVENT_IOCTL _IOWR(0xB4, 0x04, struct gpioevent_request) diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 849ef1515d04..1f412fbf561b 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -83,21 +83,6 @@ enum dsa_completion_status { #define DSA_COMP_STATUS_MASK 0x7f #define DSA_COMP_STATUS_WRITE 0x80 -struct dsa_batch_desc { - uint32_t pasid:20; - uint32_t rsvd:11; - uint32_t priv:1; - uint32_t flags:24; - uint32_t opcode:8; - uint64_t completion_addr; - uint64_t desc_list_addr; - uint64_t rsvd1; - uint32_t desc_count; - uint16_t interrupt_handle; - uint16_t rsvd2; - uint8_t rsvd3[24]; -} __attribute__((packed)); - struct dsa_hw_desc { uint32_t pasid:20; uint32_t rsvd:11; @@ -109,6 +94,7 @@ struct dsa_hw_desc { uint64_t src_addr; uint64_t rdback_addr; uint64_t pattern; + uint64_t desc_list_addr; }; union { uint64_t dst_addr; @@ -116,7 +102,10 @@ struct dsa_hw_desc { uint64_t src2_addr; uint64_t comp_pattern; }; - uint32_t xfer_size; + union { + uint32_t xfer_size; + uint32_t desc_count; + }; uint16_t int_handle; uint16_t rsvd1; union { diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h index 683878036d76..b122cfac7128 100644 --- a/include/uapi/linux/if_arcnet.h +++ b/include/uapi/linux/if_arcnet.h @@ -60,7 +60,7 @@ struct arc_rfc1201 { __u8 proto; /* protocol ID field - varies */ __u8 split_flag; /* for use with split packets */ __be16 sequence; /* sequence number */ - __u8 payload[0]; /* space remaining in packet (504 bytes)*/ + __u8 payload[]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 @@ -69,7 +69,7 @@ struct arc_rfc1201 { */ struct arc_rfc1051 { __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ - __u8 payload[0]; /* 507 bytes */ + __u8 payload[]; /* 507 bytes */ }; #define RFC1051_HDR_SIZE 1 @@ -80,7 +80,7 @@ struct arc_rfc1051 { struct arc_eth_encap { __u8 proto; /* Always ARC_P_ETHER */ struct ethhdr eth; /* standard ethernet header (yuck!) */ - __u8 payload[0]; /* 493 bytes */ + __u8 payload[]; /* 493 bytes */ }; #define ETH_ENCAP_HDR_SIZE 14 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 42f7ca38ad80..bfe621ea51b3 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -174,6 +174,16 @@ struct br_vlan_msg { __u32 ifindex; }; +enum { + BRIDGE_VLANDB_DUMP_UNSPEC, + BRIDGE_VLANDB_DUMP_FLAGS, + __BRIDGE_VLANDB_DUMP_MAX, +}; +#define BRIDGE_VLANDB_DUMP_MAX (__BRIDGE_VLANDB_DUMP_MAX - 1) + +/* flags used in BRIDGE_VLANDB_DUMP_FLAGS attribute to affect dumps */ +#define BRIDGE_VLANDB_DUMPF_STATS (1 << 0) /* Include stats in the dump */ + /* Bridge vlan RTM attributes * [BRIDGE_VLANDB_ENTRY] = { * [BRIDGE_VLANDB_ENTRY_INFO] @@ -192,10 +202,46 @@ enum { BRIDGE_VLANDB_ENTRY_INFO, BRIDGE_VLANDB_ENTRY_RANGE, BRIDGE_VLANDB_ENTRY_STATE, + BRIDGE_VLANDB_ENTRY_TUNNEL_INFO, + BRIDGE_VLANDB_ENTRY_STATS, __BRIDGE_VLANDB_ENTRY_MAX, }; #define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1) +/* [BRIDGE_VLANDB_ENTRY] = { + * [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { + * [BRIDGE_VLANDB_TINFO_ID] + * ... + * } + * } + */ +enum { + BRIDGE_VLANDB_TINFO_UNSPEC, + BRIDGE_VLANDB_TINFO_ID, + BRIDGE_VLANDB_TINFO_CMD, + __BRIDGE_VLANDB_TINFO_MAX, +}; +#define BRIDGE_VLANDB_TINFO_MAX (__BRIDGE_VLANDB_TINFO_MAX - 1) + +/* [BRIDGE_VLANDB_ENTRY] = { + * [BRIDGE_VLANDB_ENTRY_STATS] = { + * [BRIDGE_VLANDB_STATS_RX_BYTES] + * ... + * } + * ... + * } + */ +enum { + BRIDGE_VLANDB_STATS_UNSPEC, + BRIDGE_VLANDB_STATS_RX_BYTES, + BRIDGE_VLANDB_STATS_RX_PACKETS, + BRIDGE_VLANDB_STATS_TX_BYTES, + BRIDGE_VLANDB_STATS_TX_PACKETS, + BRIDGE_VLANDB_STATS_PAD, + __BRIDGE_VLANDB_STATS_MAX, +}; +#define BRIDGE_VLANDB_STATS_MAX (__BRIDGE_VLANDB_STATS_MAX - 1) + /* Bridge multicast database attributes * [MDBA_MDB] = { * [MDBA_MDB_ENTRY] = { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 024af2d1d0af..127c704eeba9 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -463,6 +463,7 @@ enum { IFLA_MACSEC_REPLAY_PROTECT, IFLA_MACSEC_VALIDATION, IFLA_MACSEC_PAD, + IFLA_MACSEC_OFFLOAD, __IFLA_MACSEC_MAX, }; @@ -489,6 +490,7 @@ enum macsec_validation_type { enum macsec_offload { MACSEC_OFFLOAD_OFF = 0, MACSEC_OFFLOAD_PHY = 1, + MACSEC_OFFLOAD_MAC = 2, __MACSEC_OFFLOAD_END, MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1, }; @@ -590,6 +592,18 @@ enum ifla_geneve_df { GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; +/* Bareudp section */ +enum { + IFLA_BAREUDP_UNSPEC, + IFLA_BAREUDP_PORT, + IFLA_BAREUDP_ETHERTYPE, + IFLA_BAREUDP_SRCPORT_MIN, + IFLA_BAREUDP_MULTIPROTO_MODE, + __IFLA_BAREUDP_MAX +}; + +#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1) + /* PPP section */ enum { IFLA_PPP_UNSPEC, @@ -960,11 +974,12 @@ enum { #define XDP_FLAGS_SKB_MODE (1U << 1) #define XDP_FLAGS_DRV_MODE (1U << 2) #define XDP_FLAGS_HW_MODE (1U << 3) +#define XDP_FLAGS_REPLACE (1U << 4) #define XDP_FLAGS_MODES (XDP_FLAGS_SKB_MODE | \ XDP_FLAGS_DRV_MODE | \ XDP_FLAGS_HW_MODE) #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \ - XDP_FLAGS_MODES) + XDP_FLAGS_MODES | XDP_FLAGS_REPLACE) /* These are stored into IFLA_XDP_ATTACHED on dump. */ enum { @@ -984,6 +999,7 @@ enum { IFLA_XDP_DRV_PROG_ID, IFLA_XDP_SKB_PROG_ID, IFLA_XDP_HW_PROG_ID, + IFLA_XDP_EXPECTED_FD, __IFLA_XDP_MAX, }; diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 1d63c43c38cc..3af2aa069a36 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -22,9 +22,11 @@ #define MACSEC_KEYID_LEN 16 -/* cipher IDs as per IEEE802.1AEbn-2011 */ +/* cipher IDs as per IEEE802.1AE-2018 (Table 14-1) */ #define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL #define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL +#define MACSEC_CIPHER_ID_GCM_AES_XPN_128 0x0080C20001000003ULL +#define MACSEC_CIPHER_ID_GCM_AES_XPN_256 0x0080C20001000004ULL /* deprecated cipher ID for GCM-AES-128 */ #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL @@ -88,11 +90,13 @@ enum macsec_sa_attrs { MACSEC_SA_ATTR_UNSPEC, MACSEC_SA_ATTR_AN, /* config/dump, u8 0..3 */ MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */ - MACSEC_SA_ATTR_PN, /* config/dump, u32 */ + MACSEC_SA_ATTR_PN, /* config/dump, u32/u64 (u64 if XPN) */ MACSEC_SA_ATTR_KEY, /* config, data */ MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */ MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */ MACSEC_SA_ATTR_PAD, + MACSEC_SA_ATTR_SSCI, /* config/dump, u32 - XPN only */ + MACSEC_SA_ATTR_SALT, /* config, 96-bit - XPN only */ __MACSEC_SA_ATTR_END, NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END, MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1, diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 1521073b6348..8533bf07450f 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -74,6 +74,8 @@ enum { #define IPPROTO_UDPLITE IPPROTO_UDPLITE IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */ #define IPPROTO_MPLS IPPROTO_MPLS + IPPROTO_ETHERNET = 143, /* Ethernet-within-IPv6 Encapsulation */ +#define IPPROTO_ETHERNET IPPROTO_ETHERNET IPPROTO_RAW = 255, /* Raw IP packets */ #define IPPROTO_RAW IPPROTO_RAW IPPROTO_MPTCP = 262, /* Multipath TCP connection */ diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index a1ff345b3f33..57cc429a9177 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -64,9 +64,11 @@ struct inet_diag_req_raw { enum { INET_DIAG_REQ_NONE, INET_DIAG_REQ_BYTECODE, + INET_DIAG_REQ_SK_BPF_STORAGES, + __INET_DIAG_REQ_MAX, }; -#define INET_DIAG_REQ_MAX INET_DIAG_REQ_BYTECODE +#define INET_DIAG_REQ_MAX (__INET_DIAG_REQ_MAX - 1) /* Bytecode is sequence of 4 byte commands followed by variable arguments. * All the commands identified by "code" are conditional jumps forward: @@ -154,6 +156,7 @@ enum { INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ INET_DIAG_MD5SIG, INET_DIAG_ULP_INFO, + INET_DIAG_SK_BPF_STORAGES, __INET_DIAG_MAX, }; @@ -163,6 +166,7 @@ enum { INET_ULP_INFO_UNSPEC, INET_ULP_INFO_NAME, INET_ULP_INFO_TLS, + INET_ULP_INFO_MPTCP, __INET_ULP_INFO_MAX, }; #define INET_ULP_INFO_MAX (__INET_ULP_INFO_MAX - 1) diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 0f1db1cccc3f..6923dc7e0298 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -652,6 +652,9 @@ /* Electronic privacy screen control */ #define KEY_PRIVACY_SCREEN_TOGGLE 0x279 +/* Select an area of screen to be copied */ +#define KEY_SELECTIVE_SCREENSHOT 0x27a + /* * Some keyboards have keys which do not have a defined meaning, these keys * are intended to be programmed / bound to macros by the user. For most diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 3f7961c1c243..e48d746b8e2a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ /* * Header file for the io_uring interface. * @@ -23,7 +23,10 @@ struct io_uring_sqe { __u64 off; /* offset into file */ __u64 addr2; }; - __u64 addr; /* pointer to buffer or iovecs */ + union { + __u64 addr; /* pointer to buffer or iovecs */ + __u64 splice_off_in; + }; __u32 len; /* buffer size or number of iovecs */ union { __kernel_rwf_t rw_flags; @@ -37,14 +40,21 @@ struct io_uring_sqe { __u32 open_flags; __u32 statx_flags; __u32 fadvise_advice; + __u32 splice_flags; }; __u64 user_data; /* data to be passed back at completion time */ union { struct { - /* index into fixed buffers, if used */ - __u16 buf_index; + /* pack this to avoid bogus arm OABI complaints */ + union { + /* index into fixed buffers, if used */ + __u16 buf_index; + /* for grouped buffer selection */ + __u16 buf_group; + } __attribute__((packed)); /* personality to use, if used */ __u16 personality; + __s32 splice_fd_in; }; __u64 __pad2[3]; }; @@ -56,6 +66,7 @@ enum { IOSQE_IO_LINK_BIT, IOSQE_IO_HARDLINK_BIT, IOSQE_ASYNC_BIT, + IOSQE_BUFFER_SELECT_BIT, }; /* @@ -71,6 +82,8 @@ enum { #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT) /* always go async */ #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT) +/* select buffer from sqe->buf_group */ +#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT) /* * io_uring_setup() flags @@ -113,6 +126,9 @@ enum { IORING_OP_RECV, IORING_OP_OPENAT2, IORING_OP_EPOLL_CTL, + IORING_OP_SPLICE, + IORING_OP_PROVIDE_BUFFERS, + IORING_OP_REMOVE_BUFFERS, /* this goes last, obviously */ IORING_OP_LAST, @@ -129,6 +145,12 @@ enum { #define IORING_TIMEOUT_ABS (1U << 0) /* + * sqe->splice_flags + * extends splice(2) flags + */ +#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ + +/* * IO completion data structure (Completion Queue Entry) */ struct io_uring_cqe { @@ -138,6 +160,17 @@ struct io_uring_cqe { }; /* + * cqe->flags + * + * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID + */ +#define IORING_CQE_F_BUFFER (1U << 0) + +enum { + IORING_CQE_BUFFER_SHIFT = 16, +}; + +/* * Magic offsets for the application to mmap the data it needs */ #define IORING_OFF_SQ_RING 0ULL @@ -204,6 +237,7 @@ struct io_uring_params { #define IORING_FEAT_SUBMIT_STABLE (1U << 2) #define IORING_FEAT_RW_CUR_POS (1U << 3) #define IORING_FEAT_CUR_PERSONALITY (1U << 4) +#define IORING_FEAT_FAST_POLL (1U << 5) /* * io_uring_register(2) opcodes and arguments diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 9c0f4a92bcff..13e8751bf24a 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -40,6 +40,7 @@ struct in6_ifreq { #define IPV6_SRCRT_STRICT 0x01 /* Deprecated; will be removed */ #define IPV6_SRCRT_TYPE_0 0 /* Deprecated; will be removed */ #define IPV6_SRCRT_TYPE_2 2 /* IPv6 type 2 Routing Header */ +#define IPV6_SRCRT_TYPE_3 3 /* RPL Segment Routing with IPv6 */ #define IPV6_SRCRT_TYPE_4 4 /* Segment Routing with IPv6 */ /* @@ -187,6 +188,7 @@ enum { DEVCONF_DISABLE_POLICY, DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN, DEVCONF_NDISC_TCLASS, + DEVCONF_RPL_SEG_ENABLED, DEVCONF_MAX }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4b95f9a31a2f..428c7dde6b4b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -474,12 +474,17 @@ struct kvm_s390_mem_op { __u32 size; /* amount of bytes */ __u32 op; /* type of operation */ __u64 buf; /* buffer in userspace */ - __u8 ar; /* the access register number */ - __u8 reserved[31]; /* should be set to 0 */ + union { + __u8 ar; /* the access register number */ + __u32 sida_offset; /* offset into the sida */ + __u8 reserved[32]; /* should be set to 0 */ + }; }; /* types for kvm_s390_mem_op->op */ #define KVM_S390_MEMOP_LOGICAL_READ 0 #define KVM_S390_MEMOP_LOGICAL_WRITE 1 +#define KVM_S390_MEMOP_SIDA_READ 2 +#define KVM_S390_MEMOP_SIDA_WRITE 3 /* flags for kvm_s390_mem_op->flags */ #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) @@ -1010,6 +1015,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_NISV_TO_USER 177 #define KVM_CAP_ARM_INJECT_EXT_DABT 178 #define KVM_CAP_S390_VCPU_RESETS 179 +#define KVM_CAP_S390_PROTECTED 180 +#define KVM_CAP_PPC_SECURE_GUEST 181 #ifdef KVM_CAP_IRQ_ROUTING @@ -1478,6 +1485,39 @@ struct kvm_enc_region { #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) +struct kvm_s390_pv_sec_parm { + __u64 origin; + __u64 length; +}; + +struct kvm_s390_pv_unp { + __u64 addr; + __u64 size; + __u64 tweak; +}; + +enum pv_cmd_id { + KVM_PV_ENABLE, + KVM_PV_DISABLE, + KVM_PV_SET_SEC_PARMS, + KVM_PV_UNPACK, + KVM_PV_VERIFY, + KVM_PV_PREP_RESET, + KVM_PV_UNSHARE_ALL, +}; + +struct kvm_pv_cmd { + __u32 cmd; /* Command to be executed */ + __u16 rc; /* Ultravisor return code */ + __u16 rrc; /* Ultravisor return reason code */ + __u64 data; /* Data or address */ + __u32 flags; /* flags for future extensions. Must be 0 for now */ + __u32 reserved[3]; +}; + +/* Available with KVM_CAP_S390_PROTECTED */ +#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ @@ -1628,4 +1668,7 @@ struct kvm_hyperv_eventfd { #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) +#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0) +#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1) + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index f6035f737193..568a4303ccce 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -13,6 +13,7 @@ enum lwtunnel_encap_types { LWTUNNEL_ENCAP_SEG6, LWTUNNEL_ENCAP_BPF, LWTUNNEL_ENCAP_SEG6_LOCAL, + LWTUNNEL_ENCAP_RPL, __LWTUNNEL_ENCAP_MAX, }; diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index 16c1fa2d89a4..84fa53ffb13f 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -64,7 +64,7 @@ #define MEDIA_BUS_FMT_RGB121212_1X36 0x1019 #define MEDIA_BUS_FMT_RGB161616_1X48 0x101a -/* YUV (including grey) - next is 0x202d */ +/* YUV (including grey) - next is 0x202e */ #define MEDIA_BUS_FMT_Y8_1X8 0x2001 #define MEDIA_BUS_FMT_UV8_1X8 0x2015 #define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 @@ -86,6 +86,7 @@ #define MEDIA_BUS_FMT_VYUY12_2X12 0x201d #define MEDIA_BUS_FMT_YUYV12_2X12 0x201e #define MEDIA_BUS_FMT_YVYU12_2X12 0x201f +#define MEDIA_BUS_FMT_Y14_1X14 0x202d #define MEDIA_BUS_FMT_UYVY8_1X16 0x200f #define MEDIA_BUS_FMT_VYUY8_1X16 0x2010 #define MEDIA_BUS_FMT_YUYV8_1X16 0x2011 diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h index 0b9c3beda345..90f9b4e1ba27 100644 --- a/include/uapi/linux/mii.h +++ b/include/uapi/linux/mii.h @@ -134,11 +134,16 @@ /* MAC and PHY tx_config_Reg[15:0] for SGMII in-band auto-negotiation.*/ #define ADVERTISE_SGMII 0x0001 /* MAC can do SGMII */ #define LPA_SGMII 0x0001 /* PHY can do SGMII */ +#define LPA_SGMII_SPD_MASK 0x0c00 /* SGMII speed mask */ +#define LPA_SGMII_FULL_DUPLEX 0x1000 /* SGMII full duplex */ #define LPA_SGMII_DPX_SPD_MASK 0x1C00 /* SGMII duplex and speed bits */ +#define LPA_SGMII_10 0x0000 /* 10Mbps */ #define LPA_SGMII_10HALF 0x0000 /* Can do 10mbps half-duplex */ #define LPA_SGMII_10FULL 0x1000 /* Can do 10mbps full-duplex */ +#define LPA_SGMII_100 0x0400 /* 100Mbps */ #define LPA_SGMII_100HALF 0x0400 /* Can do 100mbps half-duplex */ #define LPA_SGMII_100FULL 0x1400 /* Can do 100mbps full-duplex */ +#define LPA_SGMII_1000 0x0800 /* 1000Mbps */ #define LPA_SGMII_1000HALF 0x0800 /* Can do 1000mbps half-duplex */ #define LPA_SGMII_1000FULL 0x1800 /* Can do 1000mbps full-duplex */ #define LPA_SGMII_LINK 0x8000 /* PHY link with copper-side partner */ diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h index fc1a64c3447b..923cc162609c 100644 --- a/include/uapi/linux/mman.h +++ b/include/uapi/linux/mman.h @@ -5,8 +5,9 @@ #include <asm/mman.h> #include <asm-generic/hugetlb_encode.h> -#define MREMAP_MAYMOVE 1 -#define MREMAP_FIXED 2 +#define MREMAP_MAYMOVE 1 +#define MREMAP_FIXED 2 +#define MREMAP_DONTUNMAP 4 #define OVERCOMMIT_GUESS 0 #define OVERCOMMIT_ALWAYS 1 diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 00c08120f3ba..98e29e7f54ac 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -57,7 +57,7 @@ struct mmc_ioc_cmd { */ struct mmc_ioc_multi_cmd { __u64 num_of_cmds; - struct mmc_ioc_cmd cmds[0]; + struct mmc_ioc_cmd cmds[]; }; #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h new file mode 100644 index 000000000000..5f2c77082d9e --- /dev/null +++ b/include/uapi/linux/mptcp.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _UAPI_MPTCP_H +#define _UAPI_MPTCP_H + +#include <linux/const.h> +#include <linux/types.h> + +#define MPTCP_SUBFLOW_FLAG_MCAP_REM _BITUL(0) +#define MPTCP_SUBFLOW_FLAG_MCAP_LOC _BITUL(1) +#define MPTCP_SUBFLOW_FLAG_JOIN_REM _BITUL(2) +#define MPTCP_SUBFLOW_FLAG_JOIN_LOC _BITUL(3) +#define MPTCP_SUBFLOW_FLAG_BKUP_REM _BITUL(4) +#define MPTCP_SUBFLOW_FLAG_BKUP_LOC _BITUL(5) +#define MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED _BITUL(6) +#define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7) +#define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8) + +enum { + MPTCP_SUBFLOW_ATTR_UNSPEC, + MPTCP_SUBFLOW_ATTR_TOKEN_REM, + MPTCP_SUBFLOW_ATTR_TOKEN_LOC, + MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, + MPTCP_SUBFLOW_ATTR_MAP_SEQ, + MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, + MPTCP_SUBFLOW_ATTR_SSN_OFFSET, + MPTCP_SUBFLOW_ATTR_MAP_DATALEN, + MPTCP_SUBFLOW_ATTR_FLAGS, + MPTCP_SUBFLOW_ATTR_ID_REM, + MPTCP_SUBFLOW_ATTR_ID_LOC, + MPTCP_SUBFLOW_ATTR_PAD, + __MPTCP_SUBFLOW_ATTR_MAX +}; + +#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1) + +/* netlink interface */ +#define MPTCP_PM_NAME "mptcp_pm" +#define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds" +#define MPTCP_PM_VER 0x1 + +/* + * ATTR types defined for MPTCP + */ +enum { + MPTCP_PM_ATTR_UNSPEC, + + MPTCP_PM_ATTR_ADDR, /* nested address */ + MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */ + MPTCP_PM_ATTR_SUBFLOWS, /* u32 */ + + __MPTCP_PM_ATTR_MAX +}; + +#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1) + +enum { + MPTCP_PM_ADDR_ATTR_UNSPEC, + + MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */ + MPTCP_PM_ADDR_ATTR_ID, /* u8 */ + MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */ + MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */ + MPTCP_PM_ADDR_ATTR_PORT, /* u16 */ + MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */ + MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */ + + __MPTCP_PM_ADDR_ATTR_MAX +}; + +#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1) + +#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0) +#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) +#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) + +enum { + MPTCP_PM_CMD_UNSPEC, + + MPTCP_PM_CMD_ADD_ADDR, + MPTCP_PM_CMD_DEL_ADDR, + MPTCP_PM_CMD_GET_ADDR, + MPTCP_PM_CMD_FLUSH_ADDRS, + MPTCP_PM_CMD_SET_LIMITS, + MPTCP_PM_CMD_GET_LIMITS, + + __MPTCP_PM_CMD_AFTER_LAST +}; + +#endif /* _UAPI_MPTCP_H */ diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 8bf79a9eb234..67e31f329190 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -29,12 +29,12 @@ struct net_dm_config_entry { struct net_dm_config_msg { __u32 entries; - struct net_dm_config_entry options[0]; + struct net_dm_config_entry options[]; }; struct net_dm_alert_msg { __u32 entries; - struct net_dm_drop_point points[0]; + struct net_dm_drop_point points[]; }; struct net_dm_user_msg { @@ -92,6 +92,7 @@ enum net_dm_attr { NET_DM_ATTR_HW_TRAP_COUNT, /* u32 */ NET_DM_ATTR_SW_DROPS, /* flag */ NET_DM_ATTR_HW_DROPS, /* flag */ + NET_DM_ATTR_FLOW_ACTION_COOKIE, /* binary */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index f96e650d0af9..7ed0b3d1c00a 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -98,6 +98,9 @@ enum hwtstamp_tx_types { * receive a time stamp via the socket error queue. */ HWTSTAMP_TX_ONESTEP_P2P, + + /* add new constants above here */ + __HWTSTAMP_TX_CNT }; /* possible values for hwtstamp_config->rx_filter */ @@ -140,6 +143,9 @@ enum hwtstamp_rx_filters { /* NTP, UDP, all versions and packet modes */ HWTSTAMP_FILTER_NTP_ALL, + + /* add new constants above here */ + __HWTSTAMP_FILTER_CNT }; /* SCM_TIMESTAMPING_PKTINFO control message */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 065218a20bb7..30f2a87270dc 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -342,6 +342,7 @@ enum nft_set_field_attributes { * @NFTA_SET_USERDATA: user data (NLA_BINARY) * @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*) * @NFTA_SET_HANDLE: set handle (NLA_U64) + * @NFTA_SET_EXPR: set expression (NLA_NESTED: nft_expr_attributes) */ enum nft_set_attributes { NFTA_SET_UNSPEC, @@ -361,6 +362,7 @@ enum nft_set_attributes { NFTA_SET_PAD, NFTA_SET_OBJ_TYPE, NFTA_SET_HANDLE, + NFTA_SET_EXPR, __NFTA_SET_MAX }; #define NFTA_SET_MAX (__NFTA_SET_MAX - 1) @@ -1552,6 +1554,19 @@ enum nft_object_attributes { #define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1) /** + * enum nft_flowtable_flags - nf_tables flowtable flags + * + * @NFT_FLOWTABLE_HW_OFFLOAD: flowtable hardware offload is enabled + * @NFT_FLOWTABLE_COUNTER: enable flow counters + */ +enum nft_flowtable_flags { + NFT_FLOWTABLE_HW_OFFLOAD = 0x1, + NFT_FLOWTABLE_COUNTER = 0x2, + NFT_FLOWTABLE_MASK = (NFT_FLOWTABLE_HW_OFFLOAD | + NFT_FLOWTABLE_COUNTER) +}; + +/** * enum nft_flowtable_attributes - nf_tables flow table netlink attributes * * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING) @@ -1770,6 +1785,7 @@ enum nft_tunnel_opts_attributes { NFTA_TUNNEL_KEY_OPTS_UNSPEC, NFTA_TUNNEL_KEY_OPTS_VXLAN, NFTA_TUNNEL_KEY_OPTS_ERSPAN, + NFTA_TUNNEL_KEY_OPTS_GENEVE, __NFTA_TUNNEL_KEY_OPTS_MAX }; #define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1) @@ -1791,6 +1807,15 @@ enum nft_tunnel_opts_erspan_attributes { }; #define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1) +enum nft_tunnel_opts_geneve_attributes { + NFTA_TUNNEL_KEY_GENEVE_UNSPEC, + NFTA_TUNNEL_KEY_GENEVE_CLASS, + NFTA_TUNNEL_KEY_GENEVE_TYPE, + NFTA_TUNNEL_KEY_GENEVE_DATA, + __NFTA_TUNNEL_KEY_GENEVE_MAX +}; +#define NFTA_TUNNEL_KEY_GENEVE_MAX (__NFTA_TUNNEL_KEY_GENEVE_MAX - 1) + enum nft_tunnel_flags { NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0), NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1), diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h index 3c586a19baea..434e6506abaa 100644 --- a/include/uapi/linux/netfilter/xt_IDLETIMER.h +++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h @@ -1,4 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * linux/include/linux/netfilter/xt_IDLETIMER.h * @@ -33,6 +32,7 @@ #include <linux/types.h> #define MAX_IDLETIMER_LABEL_SIZE 28 +#define XT_IDLETIMER_ALARM 0x01 struct idletimer_tg_info { __u32 timeout; @@ -43,4 +43,14 @@ struct idletimer_tg_info { struct idletimer_tg *timer __attribute__((aligned(8))); }; +struct idletimer_tg_info_v1 { + __u32 timeout; + + char label[MAX_IDLETIMER_LABEL_SIZE]; + + __u8 timer_type; + + /* for kernel module internal use only */ + struct idletimer_tg *timer __attribute__((aligned(8))); +}; #endif diff --git a/include/uapi/linux/netfilter_bridge/ebt_among.h b/include/uapi/linux/netfilter_bridge/ebt_among.h index 9acf757bc1f7..73b26a280c4f 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_among.h +++ b/include/uapi/linux/netfilter_bridge/ebt_among.h @@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple { struct ebt_mac_wormhash { int table[257]; int poolsize; - struct ebt_mac_wormhash_tuple pool[0]; + struct ebt_mac_wormhash_tuple pool[]; }; #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5eab191607f8..2b691161830f 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -265,6 +265,29 @@ */ /** + * DOC: TID configuration + * + * TID config support can be checked in the %NL80211_ATTR_TID_CONFIG + * attribute given in wiphy capabilities. + * + * The necessary configuration parameters are mentioned in + * &enum nl80211_tid_config_attr and it will be passed to the + * %NL80211_CMD_SET_TID_CONFIG command in %NL80211_ATTR_TID_CONFIG. + * + * If the configuration needs to be applied for specific peer then the MAC + * address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the + * configuration will be applied for all the connected peers in the vif except + * any peers that have peer specific configuration for the TID by default; if + * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values + * will be overwritten. + * + * All this configuration is valid only for STA's current connection + * i.e. the configuration will be reset to default when the STA connects back + * after disconnection/roaming, and this configuration will be cleared when + * the interface goes down. + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -1125,6 +1148,9 @@ * peer MAC address and %NL80211_ATTR_FRAME is used to specify the frame * content. The frame is ethernet data. * + * @NL80211_CMD_SET_TID_CONFIG: Data frame TID specific configuration + * is passed using %NL80211_ATTR_TID_CONFIG attribute. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1349,6 +1375,8 @@ enum nl80211_commands { NL80211_CMD_PROBE_MESH_LINK, + NL80211_CMD_SET_TID_CONFIG, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1604,7 +1632,8 @@ enum nl80211_commands { * flag is included, then control port frames are sent over NL80211 instead * using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is * to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER - * flag. + * flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth + * frames are not forwared over the control port. * * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. * We recommend using nested, driver-specific attributes within this. @@ -2400,6 +2429,47 @@ enum nl80211_commands { * @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key * (u16). * + * @NL80211_ATTR_HE_BSS_COLOR: nested attribute for BSS Color Settings. + * + * @NL80211_ATTR_IFTYPE_AKM_SUITES: nested array attribute, with each entry + * using attributes from &enum nl80211_iftype_akm_attributes. This + * attribute is sent in a response to %NL80211_CMD_GET_WIPHY indicating + * supported AKM suites capability per interface. AKMs advertised in + * %NL80211_ATTR_AKM_SUITES are default capabilities if AKM suites not + * advertised for a specific interface type. + * + * @NL80211_ATTR_TID_CONFIG: TID specific configuration in a + * nested attribute with &enum nl80211_tid_config_attr sub-attributes; + * on output (in wiphy attributes) it contains only the feature sub- + * attributes. + * + * @NL80211_ATTR_CONTROL_PORT_NO_PREAUTH: disable preauth frame rx on control + * port in order to forward/receive them as ordinary data frames. + * + * @NL80211_ATTR_PMK_LIFETIME: Maximum lifetime for PMKSA in seconds (u32, + * dot11RSNAConfigPMKReauthThreshold; 0 is not a valid value). + * An optional parameter configured through %NL80211_CMD_SET_PMKSA. + * Drivers that trigger roaming need to know the lifetime of the + * configured PMKSA for triggering the full vs. PMKSA caching based + * authentication. This timeout helps authentication methods like SAE, + * where PMK gets updated only by going through a full (new SAE) + * authentication instead of getting updated during an association for EAP + * authentication. No new full authentication within the PMK expiry shall + * result in a disassociation at the end of the lifetime. + * + * @NL80211_ATTR_PMK_REAUTH_THRESHOLD: Reauthentication threshold time, in + * terms of percentage of %NL80211_ATTR_PMK_LIFETIME + * (u8, dot11RSNAConfigPMKReauthThreshold, 1..100). This is an optional + * parameter configured through %NL80211_CMD_SET_PMKSA. Requests the + * driver to trigger a full authentication roam (without PMKSA caching) + * after the reauthentication threshold time, but before the PMK lifetime + * has expired. + * + * Authentication methods like SAE need to be able to generate a new PMKSA + * entry without having to force a disconnection after the PMK timeout. If + * no roaming occurs between the reauth threshold and PMK expiration, + * disassociation is still forced. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2864,6 +2934,17 @@ enum nl80211_attrs { NL80211_ATTR_VLAN_ID, + NL80211_ATTR_HE_BSS_COLOR, + + NL80211_ATTR_IFTYPE_AKM_SUITES, + + NL80211_ATTR_TID_CONFIG, + + NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, + + NL80211_ATTR_PMK_LIFETIME, + NL80211_ATTR_PMK_REAUTH_THRESHOLD, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3583,6 +3664,8 @@ enum nl80211_wmm_rule { * @NL80211_FREQUENCY_ATTR_WMM: this channel has wmm limitations. * This is a nested attribute that contains the wmm limitation per AC. * (see &enum nl80211_wmm_rule) + * @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel + * in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -3612,6 +3695,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_NO_20MHZ, NL80211_FREQUENCY_ATTR_NO_10MHZ, NL80211_FREQUENCY_ATTR_WMM, + NL80211_FREQUENCY_ATTR_NO_HE, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -3809,6 +3893,7 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed + * @NL80211_RRF_NO_HE: HE operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -3826,6 +3911,7 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_HT40PLUS = 1<<14, NL80211_RRF_NO_80MHZ = 1<<15, NL80211_RRF_NO_160MHZ = 1<<16, + NL80211_RRF_NO_HE = 1<<17, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -4532,6 +4618,7 @@ enum nl80211_key_default_types { * See &enum nl80211_key_default_types. * @NL80211_KEY_MODE: the mode from enum nl80211_key_mode. * Defaults to @NL80211_KEY_RX_TX. + * @NL80211_KEY_DEFAULT_BEACON: flag indicating default Beacon frame key * * @__NL80211_KEY_AFTER_LAST: internal * @NL80211_KEY_MAX: highest key attribute @@ -4547,6 +4634,7 @@ enum nl80211_key_attributes { NL80211_KEY_TYPE, NL80211_KEY_DEFAULT_TYPES, NL80211_KEY_MODE, + NL80211_KEY_DEFAULT_BEACON, /* keep last */ __NL80211_KEY_AFTER_LAST, @@ -4703,6 +4791,69 @@ enum nl80211_tx_power_setting { }; /** + * enum nl80211_tid_config - TID config state + * @NL80211_TID_CONFIG_ENABLE: Enable config for the TID + * @NL80211_TID_CONFIG_DISABLE: Disable config for the TID + */ +enum nl80211_tid_config { + NL80211_TID_CONFIG_ENABLE, + NL80211_TID_CONFIG_DISABLE, +}; + +/* enum nl80211_tid_config_attr - TID specific configuration. + * @NL80211_TID_CONFIG_ATTR_PAD: pad attribute for 64-bit values + * @NL80211_TID_CONFIG_ATTR_VIF_SUPP: a bitmap (u64) of attributes supported + * for per-vif configuration; doesn't list the ones that are generic + * (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE). + * @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but + * per peer instead. + * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if no peer + * is selected, if set indicates that the new configuration overrides + * all previous peer configurations, otherwise previous peer specific + * configurations should be left untouched. If peer is selected then + * it will reset particular TID configuration of that peer and it will + * not accept other TID config attributes along with peer. + * @NL80211_TID_CONFIG_ATTR_TIDS: a bitmask value of TIDs (bit 0 to 7) + * Its type is u16. + * @NL80211_TID_CONFIG_ATTR_NOACK: Configure ack policy for the TID. + * specified in %NL80211_TID_CONFIG_ATTR_TID. see %enum nl80211_tid_config. + * Its type is u8. + * @NL80211_TID_CONFIG_ATTR_RETRY_SHORT: Number of retries used with data frame + * transmission, user-space sets this configuration in + * &NL80211_CMD_SET_TID_CONFIG. It is u8 type, min value is 1 and + * the max value is advertised by the driver in this attribute on + * output in wiphy capabilities. + * @NL80211_TID_CONFIG_ATTR_RETRY_LONG: Number of retries used with data frame + * transmission, user-space sets this configuration in + * &NL80211_CMD_SET_TID_CONFIG. Its type is u8, min value is 1 and + * the max value is advertised by the driver in this attribute on + * output in wiphy capabilities. + * @NL80211_TID_CONFIG_ATTR_AMPDU_CTRL: Enable/Disable aggregation for the TIDs + * specified in %NL80211_TID_CONFIG_ATTR_TIDS. Its type is u8, using + * the values from &nl80211_tid_config. + * @NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL: Enable/Disable RTS_CTS for the TIDs + * specified in %NL80211_TID_CONFIG_ATTR_TIDS. It is u8 type, using + * the values from &nl80211_tid_config. + */ +enum nl80211_tid_config_attr { + __NL80211_TID_CONFIG_ATTR_INVALID, + NL80211_TID_CONFIG_ATTR_PAD, + NL80211_TID_CONFIG_ATTR_VIF_SUPP, + NL80211_TID_CONFIG_ATTR_PEER_SUPP, + NL80211_TID_CONFIG_ATTR_OVERRIDE, + NL80211_TID_CONFIG_ATTR_TIDS, + NL80211_TID_CONFIG_ATTR_NOACK, + NL80211_TID_CONFIG_ATTR_RETRY_SHORT, + NL80211_TID_CONFIG_ATTR_RETRY_LONG, + NL80211_TID_CONFIG_ATTR_AMPDU_CTRL, + NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL, + + /* keep last */ + __NL80211_TID_CONFIG_ATTR_AFTER_LAST, + NL80211_TID_CONFIG_ATTR_MAX = __NL80211_TID_CONFIG_ATTR_AFTER_LAST - 1 +}; + +/** * enum nl80211_packet_pattern_attr - packet pattern attribute * @__NL80211_PKTPAT_INVALID: invalid number for nested attribute * @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has @@ -5521,6 +5672,18 @@ enum nl80211_feature_flags { * feature, which prevents bufferbloat by using the expected transmission * time to limit the amount of data buffered in the hardware. * + * @NL80211_EXT_FEATURE_BEACON_PROTECTION: The driver supports Beacon protection + * and can receive key configuration for BIGTK using key indexes 6 and 7. + * + * @NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH: The driver can disable the + * forwarding of preauth frames over the control port. They are then + * handled as ordinary data frames. + * + * @NL80211_EXT_FEATURE_PROTECTED_TWT: Driver supports protected TWT frames + * + * @NL80211_EXT_FEATURE_DEL_IBSS_STA: The driver supports removing stations + * in IBSS mode, essentially by dropping their state. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5568,6 +5731,10 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_SAE_OFFLOAD, NL80211_EXT_FEATURE_VLAN_OFFLOAD, NL80211_EXT_FEATURE_AQL, + NL80211_EXT_FEATURE_BEACON_PROTECTION, + NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH, + NL80211_EXT_FEATURE_PROTECTED_TWT, + NL80211_EXT_FEATURE_DEL_IBSS_STA, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -6190,12 +6357,14 @@ enum nl80211_ftm_responder_stats { * @NL80211_PREAMBLE_HT: HT preamble * @NL80211_PREAMBLE_VHT: VHT preamble * @NL80211_PREAMBLE_DMG: DMG preamble + * @NL80211_PREAMBLE_HE: HE preamble */ enum nl80211_preamble { NL80211_PREAMBLE_LEGACY, NL80211_PREAMBLE_HT, NL80211_PREAMBLE_VHT, NL80211_PREAMBLE_DMG, + NL80211_PREAMBLE_HE, }; /** @@ -6388,6 +6557,10 @@ enum nl80211_peer_measurement_attrs { * is valid) * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating * the maximum FTMs per burst (if not present anything is valid) + * @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if + * trigger based ranging measurement is supported + * @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating + * if non trigger based ranging measurement is supported * * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number @@ -6403,6 +6576,8 @@ enum nl80211_peer_measurement_ftm_capa { NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS, NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT, NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST, + NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED, + NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED, /* keep last */ NUM_NL80211_PMSR_FTM_CAPA_ATTR, @@ -6432,6 +6607,20 @@ enum nl80211_peer_measurement_ftm_capa { * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag) * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data * (flag) + * @NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED: request trigger based ranging + * measurement (flag). + * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED are + * mutually exclusive. + * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor + * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based + * ranging will be used. + * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based + * ranging measurement (flag) + * This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are + * mutually exclusive. + * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor + * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based + * ranging will be used. * * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number @@ -6448,6 +6637,8 @@ enum nl80211_peer_measurement_ftm_req { NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES, NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI, NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC, + NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED, + NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED, /* keep last */ NUM_NL80211_PMSR_FTM_REQ_ATTR, @@ -6587,5 +6778,51 @@ enum nl80211_obss_pd_attributes { NL80211_HE_OBSS_PD_ATTR_MAX = __NL80211_HE_OBSS_PD_ATTR_LAST - 1, }; +/** + * enum nl80211_bss_color_attributes - BSS Color attributes + * @__NL80211_HE_BSS_COLOR_ATTR_INVALID: Invalid + * + * @NL80211_HE_BSS_COLOR_ATTR_COLOR: the current BSS Color. + * @NL80211_HE_BSS_COLOR_ATTR_DISABLED: is BSS coloring disabled. + * @NL80211_HE_BSS_COLOR_ATTR_PARTIAL: the AID equation to be used.. + * + * @__NL80211_HE_BSS_COLOR_ATTR_LAST: Internal + * @NL80211_HE_BSS_COLOR_ATTR_MAX: highest BSS Color attribute. + */ +enum nl80211_bss_color_attributes { + __NL80211_HE_BSS_COLOR_ATTR_INVALID, + + NL80211_HE_BSS_COLOR_ATTR_COLOR, + NL80211_HE_BSS_COLOR_ATTR_DISABLED, + NL80211_HE_BSS_COLOR_ATTR_PARTIAL, + + /* keep last */ + __NL80211_HE_BSS_COLOR_ATTR_LAST, + NL80211_HE_BSS_COLOR_ATTR_MAX = __NL80211_HE_BSS_COLOR_ATTR_LAST - 1, +}; + +/** + * enum nl80211_iftype_akm_attributes - interface type AKM attributes + * @__NL80211_IFTYPE_AKM_ATTR_INVALID: Invalid + * + * @NL80211_IFTYPE_AKM_ATTR_IFTYPES: nested attribute containing a flag + * attribute for each interface type that supports AKM suites specified in + * %NL80211_IFTYPE_AKM_ATTR_SUITES + * @NL80211_IFTYPE_AKM_ATTR_SUITES: an array of u32. Used to indicate supported + * AKM suites for the specified interface types. + * + * @__NL80211_IFTYPE_AKM_ATTR_LAST: Internal + * @NL80211_IFTYPE_AKM_ATTR_MAX: highest interface type AKM attribute. + */ +enum nl80211_iftype_akm_attributes { + __NL80211_IFTYPE_AKM_ATTR_INVALID, + + NL80211_IFTYPE_AKM_ATTR_IFTYPES, + NL80211_IFTYPE_AKM_ATTR_SUITES, + + /* keep last */ + __NL80211_IFTYPE_AKM_ATTR_LAST, + NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1, +}; #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index ae2bff14e7e1..9b14519e74d9 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -958,6 +958,7 @@ enum ovs_action_attr { OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */ OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */ OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */ + OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ @@ -1050,4 +1051,10 @@ struct ovs_zone_limit { __u32 count; }; +enum ovs_dec_ttl_attr { + OVS_DEC_TTL_ATTR_UNSPEC, + OVS_DEC_TTL_ATTR_ACTION, /* Nested struct nlattr */ + __OVS_DEC_TTL_ATTR_MAX +}; + #endif /* _LINUX_OPENVSWITCH_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 5437690483cd..f9701410d3b5 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -605,6 +605,7 @@ #define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */ #define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ #define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ +#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */ #define PCI_EXP_SLTSTA 26 /* Slot Status */ #define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ #define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */ @@ -680,6 +681,7 @@ #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ #define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ +#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */ #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ #define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */ diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h index cbf422e56696..c3ab4c826297 100644 --- a/include/uapi/linux/pcitest.h +++ b/include/uapi/linux/pcitest.h @@ -19,5 +19,13 @@ #define PCITEST_MSIX _IOW('P', 0x7, int) #define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int) #define PCITEST_GET_IRQTYPE _IO('P', 0x9) +#define PCITEST_CLEAR_IRQ _IO('P', 0x10) + +#define PCITEST_FLAGS_USE_DMA 0x00000001 + +struct pci_endpoint_test_xfer_param { + unsigned long size; + unsigned char flags; +}; #endif /* __UAPI_LINUX_PCITEST_H */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 377d794d3105..7b2d6fc9e6ed 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -142,8 +142,9 @@ enum perf_event_sample_format { PERF_SAMPLE_REGS_INTR = 1U << 18, PERF_SAMPLE_PHYS_ADDR = 1U << 19, PERF_SAMPLE_AUX = 1U << 20, + PERF_SAMPLE_CGROUP = 1U << 21, - PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; @@ -181,6 +182,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */ + PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -208,6 +211,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -377,7 +382,8 @@ struct perf_event_attr { ksymbol : 1, /* include ksymbol events */ bpf_event : 1, /* include bpf events */ aux_output : 1, /* generate AUX records instead of events */ - __reserved_1 : 32; + cgroup : 1, /* include cgroup events */ + __reserved_1 : 31; union { __u32 wakeup_events; /* wakeup every n events */ @@ -853,7 +859,9 @@ enum perf_event_type { * char data[size];}&& PERF_SAMPLE_RAW * * { u64 nr; - * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK + * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX + * { u64 from, to, flags } lbr[nr]; + * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER @@ -1006,6 +1014,16 @@ enum perf_event_type { */ PERF_RECORD_BPF_EVENT = 18, + /* + * struct { + * struct perf_event_header header; + * u64 id; + * char path[]; + * struct sample_id sample_id; + * }; + */ + PERF_RECORD_CGROUP = 19, + PERF_RECORD_MAX, /* non-ABI */ }; diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 449a63971451..9f06d29cab70 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -17,6 +17,8 @@ enum { TCA_ACT_PAD, TCA_ACT_COOKIE, TCA_ACT_FLAGS, + TCA_ACT_HW_STATS, + TCA_ACT_USED_HW_STATS, __TCA_ACT_MAX }; @@ -24,6 +26,26 @@ enum { * actions stats. */ +/* tca HW stats type + * When user does not pass the attribute, he does not care. + * It is the same as if he would pass the attribute with + * all supported bits set. + * In case no bits are set, user is not interested in getting any HW statistics. + */ +#define TCA_ACT_HW_STATS_IMMEDIATE (1 << 0) /* Means that in dump, user + * gets the current HW stats + * state from the device + * queried at the dump time. + */ +#define TCA_ACT_HW_STATS_DELAYED (1 << 1) /* Means that in dump, user gets + * HW stats that might be out of date + * for some time, maybe couple of + * seconds. This is the case when + * driver polls stats updates + * periodically or when it gets async + * stats update from the device. + */ + #define TCA_ACT_MAX __TCA_ACT_MAX #define TCA_OLD_COMPAT (TCA_ACT_MAX+1) #define TCA_ACT_MAX_PRIO 32 diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index bbe791b24168..0c02737c8f47 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -256,6 +256,7 @@ enum { TCA_RED_PARMS, TCA_RED_STAB, TCA_RED_MAX_P, + TCA_RED_FLAGS, /* bitfield32 */ __TCA_RED_MAX, }; @@ -268,12 +269,28 @@ struct tc_red_qopt { unsigned char Wlog; /* log(W) */ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ unsigned char Scell_log; /* cell size for idle damping */ + + /* This field can be used for flags that a RED-like qdisc has + * historically supported. E.g. when configuring RED, it can be used for + * ECN, HARDDROP and ADAPTATIVE. For SFQ it can be used for ECN, + * HARDDROP. Etc. Because this field has not been validated, and is + * copied back on dump, any bits besides those to which a given qdisc + * has assigned a historical meaning need to be considered for free use + * by userspace tools. + * + * Any further flags need to be passed differently, e.g. through an + * attribute (such as TCA_RED_FLAGS above). Such attribute should allow + * passing both recent and historic flags in one value. + */ unsigned char flags; #define TC_RED_ECN 1 #define TC_RED_HARDDROP 2 #define TC_RED_ADAPTATIVE 4 +#define TC_RED_NODROP 8 }; +#define TC_RED_HISTORIC_FLAGS (TC_RED_ECN | TC_RED_HARDDROP | TC_RED_ADAPTATIVE) + struct tc_red_xstats { __u32 early; /* Early drops */ __u32 pdrop; /* Drops due to queue limits */ @@ -894,6 +911,8 @@ enum { TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ + TCA_FQ_TIMER_SLACK, /* timer slack */ + __TCA_FQ_MAX }; @@ -1197,8 +1216,8 @@ enum { * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL] */ -#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0) -#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1) +#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0) +#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1) enum { TCA_TAPRIO_ATTR_UNSPEC, diff --git a/include/uapi/linux/rpl.h b/include/uapi/linux/rpl.h new file mode 100644 index 000000000000..1dccb55cf8c6 --- /dev/null +++ b/include/uapi/linux/rpl.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * IPv6 RPL-SR implementation + * + * Author: + * (C) 2020 Alexander Aring <alex.aring@gmail.com> + */ + +#ifndef _UAPI_LINUX_RPL_H +#define _UAPI_LINUX_RPL_H + +#include <asm/byteorder.h> +#include <linux/types.h> +#include <linux/in6.h> + +/* + * RPL SR Header + */ +struct ipv6_rpl_sr_hdr { + __u8 nexthdr; + __u8 hdrlen; + __u8 type; + __u8 segments_left; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u32 cmpre:4, + cmpri:4, + reserved:4, + pad:4, + reserved1:16; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u32 reserved:20, + pad:4, + cmpri:4, + cmpre:4; +#else +#error "Please fix <asm/byteorder.h>" +#endif + + union { + struct in6_addr addr[0]; + __u8 data[0]; + } segments; +} __attribute__((packed)); + +#define rpl_segaddr segments.addr +#define rpl_segdata segments.data + +#endif diff --git a/include/uapi/linux/rpl_iptunnel.h b/include/uapi/linux/rpl_iptunnel.h new file mode 100644 index 000000000000..f4eed1f92baa --- /dev/null +++ b/include/uapi/linux/rpl_iptunnel.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * IPv6 RPL-SR implementation + * + * Author: + * (C) 2020 Alexander Aring <alex.aring@gmail.com> + */ + +#ifndef _UAPI_LINUX_RPL_IPTUNNEL_H +#define _UAPI_LINUX_RPL_IPTUNNEL_H + +enum { + RPL_IPTUNNEL_UNSPEC, + RPL_IPTUNNEL_SRH, + __RPL_IPTUNNEL_MAX, +}; +#define RPL_IPTUNNEL_MAX (__RPL_IPTUNNEL_MAX - 1) + +#define RPL_IPTUNNEL_SRH_SIZE(srh) (((srh)->hdrlen + 1) << 3) + +#endif diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h index 095af360326a..83bba58d47f4 100644 --- a/include/uapi/linux/rtc.h +++ b/include/uapi/linux/rtc.h @@ -12,6 +12,9 @@ #ifndef _UAPI_LINUX_RTC_H_ #define _UAPI_LINUX_RTC_H_ +#include <linux/const.h> +#include <linux/ioctl.h> + /* * The struct used to pass data via the following ioctl. Similar to the * struct tm in <time.h>, but it needs to be here so that the kernel @@ -92,10 +95,10 @@ struct rtc_pll_info { #define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ #define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ -#define RTC_VL_DATA_INVALID BIT(0) /* Voltage too low, RTC data is invalid */ -#define RTC_VL_BACKUP_LOW BIT(1) /* Backup voltage is low */ -#define RTC_VL_BACKUP_EMPTY BIT(2) /* Backup empty or not present */ -#define RTC_VL_ACCURACY_LOW BIT(3) /* Voltage is low, RTC accuracy is reduced */ +#define RTC_VL_DATA_INVALID _BITUL(0) /* Voltage too low, RTC data is invalid */ +#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */ +#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */ +#define RTC_VL_ACCURACY_LOW _BITUL(3) /* Voltage is low, RTC accuracy is reduced */ #define RTC_VL_READ _IOR('p', 0x13, unsigned int) /* Voltage low detection */ #define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */ diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index 2e3bc22c6f20..3bac0a8ceab2 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -35,6 +35,7 @@ /* Flags for the clone3() syscall. */ #define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */ +#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */ /* * cloning flags intersect with CSIGNAL so can be used with unshare and clone3 @@ -81,6 +82,8 @@ * @set_tid_size: This defines the size of the array referenced * in @set_tid. This cannot be larger than the * kernel's limit of nested PID namespaces. + * @cgroup: If CLONE_INTO_CGROUP is specified set this to + * a file descriptor for the cgroup. * * The structure is versioned by size and thus extensible. * New struct members must go at the end of the struct and @@ -97,11 +100,13 @@ struct clone_args { __aligned_u64 tls; __aligned_u64 set_tid; __aligned_u64 set_tid_size; + __aligned_u64 cgroup; }; #endif #define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */ #define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */ +#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */ /* * Scheduling policies diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index be84d87f1f46..c1735455bc53 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -22,6 +22,7 @@ #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) +#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4) /* * All BPF programs must return a 32-bit value. diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index 50e991952c97..ed2a96f43ce4 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -9,7 +9,7 @@ #ifndef _UAPI_SERIO_H #define _UAPI_SERIO_H - +#include <linux/const.h> #include <linux/ioctl.h> #define SPIOCSTYPE _IOW('q', 0x01, unsigned long) @@ -18,10 +18,10 @@ /* * bit masks for use in "interrupt" flags (3rd argument) */ -#define SERIO_TIMEOUT BIT(0) -#define SERIO_PARITY BIT(1) -#define SERIO_FRAME BIT(2) -#define SERIO_OOB_DATA BIT(3) +#define SERIO_TIMEOUT _BITUL(0) +#define SERIO_PARITY _BITUL(1) +#define SERIO_FRAME _BITUL(2) +#define SERIO_OOB_DATA _BITUL(3) /* * Serio types diff --git a/include/uapi/linux/sock_diag.h b/include/uapi/linux/sock_diag.h index e5925009a652..5f74a5f6091d 100644 --- a/include/uapi/linux/sock_diag.h +++ b/include/uapi/linux/sock_diag.h @@ -36,4 +36,30 @@ enum sknetlink_groups { }; #define SKNLGRP_MAX (__SKNLGRP_MAX - 1) +enum { + SK_DIAG_BPF_STORAGE_REQ_NONE, + SK_DIAG_BPF_STORAGE_REQ_MAP_FD, + __SK_DIAG_BPF_STORAGE_REQ_MAX, +}; + +#define SK_DIAG_BPF_STORAGE_REQ_MAX (__SK_DIAG_BPF_STORAGE_REQ_MAX - 1) + +enum { + SK_DIAG_BPF_STORAGE_REP_NONE, + SK_DIAG_BPF_STORAGE, + __SK_DIAG_BPF_STORAGE_REP_MAX, +}; + +#define SK_DIAB_BPF_STORAGE_REP_MAX (__SK_DIAG_BPF_STORAGE_REP_MAX - 1) + +enum { + SK_DIAG_BPF_STORAGE_NONE, + SK_DIAG_BPF_STORAGE_PAD, + SK_DIAG_BPF_STORAGE_MAP_ID, + SK_DIAG_BPF_STORAGE_MAP_VALUE, + __SK_DIAG_BPF_STORAGE_MAX, +}; + +#define SK_DIAG_BPF_STORAGE_MAX (__SK_DIAG_BPF_STORAGE_MAX - 1) + #endif /* _UAPI__SOCK_DIAG_H__ */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index fd9eb8f6bcae..f2acb2566333 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -312,6 +312,7 @@ enum { TCP_NLA_REORD_SEEN, /* reordering events seen */ TCP_NLA_SRTT, /* smoothed RTT in usecs */ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */ + TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */ }; /* for TCP_MD5SIG socket option */ @@ -345,5 +346,7 @@ struct tcp_zerocopy_receive { __u64 address; /* in: address of mapping */ __u32 length; /* in/out: number of bytes to map/mapped */ __u32 recv_skip_hint; /* out: amount of bytes to skip */ + __u32 inq; /* out: amount of bytes in read queue */ + __s32 err; /* out: socket error */ }; #endif /* _UAPI_LINUX_TCP_H */ diff --git a/include/uapi/linux/um_timetravel.h b/include/uapi/linux/um_timetravel.h new file mode 100644 index 000000000000..ca3238222b6d --- /dev/null +++ b/include/uapi/linux/um_timetravel.h @@ -0,0 +1,128 @@ +/* + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Copyright (C) 2019 Intel Corporation + */ +#ifndef _UAPI_LINUX_UM_TIMETRAVEL_H +#define _UAPI_LINUX_UM_TIMETRAVEL_H +#include <linux/types.h> + +/** + * struct um_timetravel_msg - UM time travel message + * + * This is the basic message type, going in both directions. + * + * This is the message passed between the host (user-mode Linux instance) + * and the calendar (the application on the other side of the socket) in + * order to implement common scheduling. + * + * Whenever UML has an event it will request runtime for it from the + * calendar, and then wait for its turn until it can run, etc. Note + * that it will only ever request the single next runtime, i.e. multiple + * REQUEST messages override each other. + */ +struct um_timetravel_msg { + /** + * @op: operation value from &enum um_timetravel_ops + */ + __u32 op; + + /** + * @seq: sequence number for the message - shall be reflected in + * the ACK response, and should be checked while processing + * the response to see if it matches + */ + __u32 seq; + + /** + * @time: time in nanoseconds + */ + __u64 time; +}; + +/** + * enum um_timetravel_ops - Operation codes + */ +enum um_timetravel_ops { + /** + * @UM_TIMETRAVEL_ACK: response (ACK) to any previous message, + * this usually doesn't carry any data in the 'time' field + * unless otherwise specified below + */ + UM_TIMETRAVEL_ACK = 0, + + /** + * @UM_TIMETRAVEL_START: initialize the connection, the time + * field contains an (arbitrary) ID to possibly be able + * to distinguish the connections. + */ + UM_TIMETRAVEL_START = 1, + + /** + * @UM_TIMETRAVEL_REQUEST: request to run at the given time + * (host -> calendar) + */ + UM_TIMETRAVEL_REQUEST = 2, + + /** + * @UM_TIMETRAVEL_WAIT: Indicate waiting for the previously requested + * runtime, new requests may be made while waiting (e.g. due to + * interrupts); the time field is ignored. The calendar must process + * this message and later send a %UM_TIMETRAVEL_RUN message when + * the host can run again. + * (host -> calendar) + */ + UM_TIMETRAVEL_WAIT = 3, + + /** + * @UM_TIMETRAVEL_GET: return the current time from the calendar in the + * ACK message, the time in the request message is ignored + * (host -> calendar) + */ + UM_TIMETRAVEL_GET = 4, + + /** + * @UM_TIMETRAVEL_UPDATE: time update to the calendar, must be sent e.g. + * before kicking an interrupt to another calendar + * (host -> calendar) + */ + UM_TIMETRAVEL_UPDATE = 5, + + /** + * @UM_TIMETRAVEL_RUN: run time request granted, current time is in + * the time field + * (calendar -> host) + */ + UM_TIMETRAVEL_RUN = 6, + + /** + * @UM_TIMETRAVEL_FREE_UNTIL: Enable free-running until the given time, + * this is a message from the calendar telling the host that it can + * freely do its own scheduling for anything before the indicated + * time. + * Note that if a calendar sends this message once, the host may + * assume that it will also do so in the future, if it implements + * wraparound semantics for the time field. + * (calendar -> host) + */ + UM_TIMETRAVEL_FREE_UNTIL = 7, + + /** + * @UM_TIMETRAVEL_GET_TOD: Return time of day, typically used once at + * boot by the virtual machines to get a synchronized time from + * the simulation. + */ + UM_TIMETRAVEL_GET_TOD = 8, +}; + +#endif /* _UAPI_LINUX_UM_TIMETRAVEL_H */ diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h new file mode 100644 index 000000000000..ea375082b3ac --- /dev/null +++ b/include/uapi/linux/usb/raw_gadget.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * USB Raw Gadget driver. + * + * See Documentation/usb/raw-gadget.rst for more details. + */ + +#ifndef _UAPI__LINUX_USB_RAW_GADGET_H +#define _UAPI__LINUX_USB_RAW_GADGET_H + +#include <asm/ioctl.h> +#include <linux/types.h> +#include <linux/usb/ch9.h> + +/* Maximum length of driver_name/device_name in the usb_raw_init struct. */ +#define UDC_NAME_LENGTH_MAX 128 + +/* + * struct usb_raw_init - argument for USB_RAW_IOCTL_INIT ioctl. + * @speed: The speed of the emulated USB device, takes the same values as + * the usb_device_speed enum: USB_SPEED_FULL, USB_SPEED_HIGH, etc. + * @driver_name: The name of the UDC driver. + * @device_name: The name of a UDC instance. + * + * The last two fields identify a UDC the gadget driver should bind to. + * For example, Dummy UDC has "dummy_udc" as its driver_name and "dummy_udc.N" + * as its device_name, where N in the index of the Dummy UDC instance. + * At the same time the dwc2 driver that is used on Raspberry Pi Zero, has + * "20980000.usb" as both driver_name and device_name. + */ +struct usb_raw_init { + __u8 driver_name[UDC_NAME_LENGTH_MAX]; + __u8 device_name[UDC_NAME_LENGTH_MAX]; + __u8 speed; +}; + +/* The type of event fetched with the USB_RAW_IOCTL_EVENT_FETCH ioctl. */ +enum usb_raw_event_type { + USB_RAW_EVENT_INVALID = 0, + + /* This event is queued when the driver has bound to a UDC. */ + USB_RAW_EVENT_CONNECT = 1, + + /* This event is queued when a new control request arrived to ep0. */ + USB_RAW_EVENT_CONTROL = 2, + + /* The list might grow in the future. */ +}; + +/* + * struct usb_raw_event - argument for USB_RAW_IOCTL_EVENT_FETCH ioctl. + * @type: The type of the fetched event. + * @length: Length of the data buffer. Updated by the driver and set to the + * actual length of the fetched event data. + * @data: A buffer to store the fetched event data. + * + * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT, + * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL. + */ +struct usb_raw_event { + __u32 type; + __u32 length; + __u8 data[0]; +}; + +#define USB_RAW_IO_FLAGS_ZERO 0x0001 +#define USB_RAW_IO_FLAGS_MASK 0x0001 + +static inline int usb_raw_io_flags_valid(__u16 flags) +{ + return (flags & ~USB_RAW_IO_FLAGS_MASK) == 0; +} + +static inline int usb_raw_io_flags_zero(__u16 flags) +{ + return (flags & USB_RAW_IO_FLAGS_ZERO); +} + +/* + * struct usb_raw_ep_io - argument for USB_RAW_IOCTL_EP0/EP_WRITE/READ ioctls. + * @ep: Endpoint handle as returned by USB_RAW_IOCTL_EP_ENABLE for + * USB_RAW_IOCTL_EP_WRITE/READ. Ignored for USB_RAW_IOCTL_EP0_WRITE/READ. + * @flags: When USB_RAW_IO_FLAGS_ZERO is specified, the zero flag is set on + * the submitted USB request, see include/linux/usb/gadget.h for details. + * @length: Length of data. + * @data: Data to send for USB_RAW_IOCTL_EP0/EP_WRITE. Buffer to store received + * data for USB_RAW_IOCTL_EP0/EP_READ. + */ +struct usb_raw_ep_io { + __u16 ep; + __u16 flags; + __u32 length; + __u8 data[0]; +}; + +/* + * Initializes a Raw Gadget instance. + * Accepts a pointer to the usb_raw_init struct as an argument. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_INIT _IOW('U', 0, struct usb_raw_init) + +/* + * Instructs Raw Gadget to bind to a UDC and start emulating a USB device. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_RUN _IO('U', 1) + +/* + * A blocking ioctl that waits for an event and returns fetched event data to + * the user. + * Accepts a pointer to the usb_raw_event struct. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EVENT_FETCH _IOR('U', 2, struct usb_raw_event) + +/* + * Queues an IN (OUT for READ) urb as a response to the last control request + * received on endpoint 0, provided that was an IN (OUT for READ) request and + * waits until the urb is completed. Copies received data to user for READ. + * Accepts a pointer to the usb_raw_ep_io struct as an argument. + * Returns length of trasferred data on success or negative error code on + * failure. + */ +#define USB_RAW_IOCTL_EP0_WRITE _IOW('U', 3, struct usb_raw_ep_io) +#define USB_RAW_IOCTL_EP0_READ _IOWR('U', 4, struct usb_raw_ep_io) + +/* + * Finds an endpoint that supports the transfer type specified in the + * descriptor and enables it. + * Accepts a pointer to the usb_endpoint_descriptor struct as an argument. + * Returns enabled endpoint handle on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EP_ENABLE _IOW('U', 5, struct usb_endpoint_descriptor) + +/* Disables specified endpoint. + * Accepts endpoint handle as an argument. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EP_DISABLE _IOW('U', 6, __u32) + +/* + * Queues an IN (OUT for READ) urb as a response to the last control request + * received on endpoint usb_raw_ep_io.ep, provided that was an IN (OUT for READ) + * request and waits until the urb is completed. Copies received data to user + * for READ. + * Accepts a pointer to the usb_raw_ep_io struct as an argument. + * Returns length of trasferred data on success or negative error code on + * failure. + */ +#define USB_RAW_IOCTL_EP_WRITE _IOW('U', 7, struct usb_raw_ep_io) +#define USB_RAW_IOCTL_EP_READ _IOWR('U', 8, struct usb_raw_ep_io) + +/* + * Switches the gadget into the configured state. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_CONFIGURE _IO('U', 9) + +/* + * Constrains UDC VBUS power usage. + * Accepts current limit in 2 mA units as an argument. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_VBUS_DRAW _IOW('U', 10, __u32) + +#endif /* _UAPI__LINUX_USB_RAW_GADGET_H */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 48f1a7c2f1f0..e7e98bde221f 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -19,7 +19,8 @@ * means the userland is reading). */ #define UFFD_API ((__u64)0xAA) -#define UFFD_API_FEATURES (UFFD_FEATURE_EVENT_FORK | \ +#define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \ + UFFD_FEATURE_EVENT_FORK | \ UFFD_FEATURE_EVENT_REMAP | \ UFFD_FEATURE_EVENT_REMOVE | \ UFFD_FEATURE_EVENT_UNMAP | \ @@ -34,7 +35,8 @@ #define UFFD_API_RANGE_IOCTLS \ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ - (__u64)1 << _UFFDIO_ZEROPAGE) + (__u64)1 << _UFFDIO_ZEROPAGE | \ + (__u64)1 << _UFFDIO_WRITEPROTECT) #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY) @@ -52,6 +54,7 @@ #define _UFFDIO_WAKE (0x02) #define _UFFDIO_COPY (0x03) #define _UFFDIO_ZEROPAGE (0x04) +#define _UFFDIO_WRITEPROTECT (0x06) #define _UFFDIO_API (0x3F) /* userfaultfd ioctl ids */ @@ -68,6 +71,8 @@ struct uffdio_copy) #define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \ struct uffdio_zeropage) +#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ + struct uffdio_writeprotect) /* read() structure */ struct uffd_msg { @@ -203,13 +208,14 @@ struct uffdio_copy { __u64 dst; __u64 src; __u64 len; +#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0) /* - * There will be a wrprotection flag later that allows to map - * pages wrprotected on the fly. And such a flag will be - * available if the wrprotection ioctl are implemented for the - * range according to the uffdio_register.ioctls. + * UFFDIO_COPY_MODE_WP will map the page write protected on + * the fly. UFFDIO_COPY_MODE_WP is available only if the + * write protected ioctl is implemented for the range + * according to the uffdio_register.ioctls. */ -#define UFFDIO_COPY_MODE_DONTWAKE ((__u64)1<<0) +#define UFFDIO_COPY_MODE_WP ((__u64)1<<1) __u64 mode; /* @@ -231,4 +237,24 @@ struct uffdio_zeropage { __s64 zeropage; }; +struct uffdio_writeprotect { + struct uffdio_range range; +/* + * UFFDIO_WRITEPROTECT_MODE_WP: set the flag to write protect a range, + * unset the flag to undo protection of a range which was previously + * write protected. + * + * UFFDIO_WRITEPROTECT_MODE_DONTWAKE: set the flag to avoid waking up + * any wait thread after the operation succeeds. + * + * NOTE: Write protecting a region (WP=1) is unrelated to page faults, + * therefore DONTWAKE flag is meaningless with WP=1. Removing write + * protection (WP=0) in response to a page fault wakes the faulting + * task unless DONTWAKE is set. + */ +#define UFFDIO_WRITEPROTECT_MODE_WP ((__u64)1<<0) +#define UFFDIO_WRITEPROTECT_MODE_DONTWAKE ((__u64)1<<1) + __u64 mode; +}; + #endif /* _LINUX_USERFAULTFD_H */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 5a7bedee2b0e..1a58d7cc4ccc 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -192,6 +192,12 @@ enum v4l2_colorfx { * We reserve 16 controls for this driver. */ #define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0) +/* + * The base for the atmel isc driver controls. + * We reserve 32 controls for this driver. + */ +#define V4L2_CID_USER_ATMEL_ISC_BASE (V4L2_CID_USER_BASE + 0x10c0) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 9e843a147ead..015516bcfaa3 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -707,6 +707,43 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) +/** + * VFIO_DEVICE_FEATURE - _IORW(VFIO_TYPE, VFIO_BASE + 17, + * struct vfio_device_feature) + * + * Get, set, or probe feature data of the device. The feature is selected + * using the FEATURE_MASK portion of the flags field. Support for a feature + * can be probed by setting both the FEATURE_MASK and PROBE bits. A probe + * may optionally include the GET and/or SET bits to determine read vs write + * access of the feature respectively. Probing a feature will return success + * if the feature is supported and all of the optionally indicated GET/SET + * methods are supported. The format of the data portion of the structure is + * specific to the given feature. The data portion is not required for + * probing. GET and SET are mutually exclusive, except for use with PROBE. + * + * Return 0 on success, -errno on failure. + */ +struct vfio_device_feature { + __u32 argsz; + __u32 flags; +#define VFIO_DEVICE_FEATURE_MASK (0xffff) /* 16-bit feature index */ +#define VFIO_DEVICE_FEATURE_GET (1 << 16) /* Get feature into data[] */ +#define VFIO_DEVICE_FEATURE_SET (1 << 17) /* Set feature from data[] */ +#define VFIO_DEVICE_FEATURE_PROBE (1 << 18) /* Probe feature support */ + __u8 data[]; +}; + +#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17) + +/* + * Provide support for setting a PCI VF Token, which is used as a shared + * secret between PF and VF drivers. This feature may only be set on a + * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing + * open VFs. Data provided when setting this feature is a 16-byte array + * (__u8 b[16]), representing a UUID. + */ +#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0) + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 5f9357dcb060..9817b7e2c968 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -565,6 +565,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ #define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ #define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ +#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */ #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ #define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */ @@ -662,6 +663,10 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C') #define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C') #define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C') +#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('G', 'R', '1', '4') /* 14 GRGR.. BGBG.. */ +#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.. GBGB.. */ /* 14bit raw bayer packed, 7 bytes for every 4 pixels */ #define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E') #define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E') @@ -1242,6 +1247,10 @@ struct v4l2_selection { typedef __u64 v4l2_std_id; +/* + * Attention: Keep the V4L2_STD_* bit definitions in sync with + * include/dt-bindings/display/sdtv-standards.h SDTV_STD_* bit definitions. + */ /* one bit for each */ #define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001) #define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002) diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index a1966cd7b677..19974392d324 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -36,6 +36,7 @@ #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ +#define VIRTIO_BALLOON_F_REPORTING 5 /* Page reporting virtqueue */ /* Size of a PFN in the balloon interface. */ #define VIRTIO_BALLOON_PFN_SHIFT 12 diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 585e07b27333..ecc27a17401a 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -46,5 +46,6 @@ #define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ #define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ +#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 86eca3208b6b..a2c006a364e0 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -74,6 +74,8 @@ #include <linux/socket.h> /* for "struct sockaddr" et al */ #include <linux/if.h> /* for IFNAMSIZ and co... */ +#include <stddef.h> /* for offsetof */ + /***************************** VERSION *****************************/ /* * This constant is used to know the availability of the wireless @@ -1090,8 +1092,7 @@ struct iw_event { /* iw_point events are special. First, the payload (extra data) come at * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, * we omit the pointer, so start at an offset. */ -#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ - (char *) NULL) +#define IW_EV_POINT_OFF offsetof(struct iw_point, length) #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ IW_EV_POINT_OFF) diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h new file mode 100644 index 000000000000..6435f0bcb556 --- /dev/null +++ b/include/uapi/misc/uacce/hisi_qm.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _UAPI_HISI_QM_H +#define _UAPI_HISI_QM_H + +#include <linux/types.h> + +/** + * struct hisi_qp_ctx - User data for hisi qp. + * @id: qp_index return to user space + * @qc_type: Accelerator algorithm type + */ +struct hisi_qp_ctx { + __u16 id; + __u16 qc_type; +}; + +#define HISI_QM_API_VER_BASE "hisi_qm_v1" +#define HISI_QM_API_VER2_BASE "hisi_qm_v2" + +/* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */ +#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx) + +#endif diff --git a/include/uapi/misc/uacce/uacce.h b/include/uapi/misc/uacce/uacce.h new file mode 100644 index 000000000000..cc7185678f47 --- /dev/null +++ b/include/uapi/misc/uacce/uacce.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _UAPIUUACCE_H +#define _UAPIUUACCE_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * UACCE_CMD_START_Q: Start queue + */ +#define UACCE_CMD_START_Q _IO('W', 0) + +/* + * UACCE_CMD_PUT_Q: + * User actively stop queue and free queue resource immediately + * Optimization method since close fd may delay + */ +#define UACCE_CMD_PUT_Q _IO('W', 1) + +/* + * UACCE Device flags: + * UACCE_DEV_SVA: Shared Virtual Addresses + * Support PASID + * Support device page faults (PCI PRI or SMMU Stall) + */ +#define UACCE_DEV_SVA BIT(0) + +/** + * enum uacce_qfrt: queue file region type + * @UACCE_QFRT_MMIO: device mmio region + * @UACCE_QFRT_DUS: device user share region + */ +enum uacce_qfrt { + UACCE_QFRT_MMIO = 0, + UACCE_QFRT_DUS = 1, +}; + +#endif diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 624f5b53eb1f..df1cc3641bda 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -49,6 +49,7 @@ enum { MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, + MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10, }; enum { @@ -78,6 +79,7 @@ struct mlx5_ib_alloc_ucontext_req { enum mlx5_lib_caps { MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, + MLX5_LIB_CAP_DYN_UAR = (__u64)1 << 1, }; enum mlx5_ib_alloc_uctx_v2_flags { @@ -266,6 +268,7 @@ struct mlx5_ib_query_device_resp { enum mlx5_ib_create_cq_flags { MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0, + MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1, }; struct mlx5_ib_create_cq { @@ -275,6 +278,9 @@ struct mlx5_ib_create_cq { __u8 cqe_comp_en; __u8 cqe_comp_res_format; __u16 flags; + __u16 uar_page_index; + __u16 reserved0; + __u32 reserved1; }; struct mlx5_ib_create_cq_resp { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index afe7da6f2b8e..24f3388c3182 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -131,6 +131,23 @@ enum mlx5_ib_var_obj_methods { MLX5_IB_METHOD_VAR_OBJ_DESTROY, }; +enum mlx5_ib_uar_alloc_attrs { + MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, + MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, +}; + +enum mlx5_ib_uar_obj_destroy_attrs { + MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum mlx5_ib_uar_obj_methods { + MLX5_IB_METHOD_UAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_UAR_OBJ_DESTROY, +}; + enum mlx5_ib_devx_umem_reg_attrs { MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR, @@ -143,6 +160,22 @@ enum mlx5_ib_devx_umem_dereg_attrs { MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), }; +enum mlx5_ib_pp_obj_methods { + MLX5_IB_METHOD_PP_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_PP_OBJ_DESTROY, +}; + +enum mlx5_ib_pp_alloc_attrs { + MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX, + MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS, + MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX, +}; + +enum mlx5_ib_pp_obj_destroy_attrs { + MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + enum mlx5_ib_devx_umem_methods { MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_METHOD_DEVX_UMEM_DEREG, @@ -173,6 +206,8 @@ enum mlx5_ib_objects { MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, MLX5_IB_OBJECT_VAR, + MLX5_IB_OBJECT_PP, + MLX5_IB_OBJECT_UAR, }; enum mlx5_ib_flow_matcher_create_attrs { diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 88b6ca70c2fe..56b26eaea083 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -44,6 +44,7 @@ enum mlx5_ib_uapi_flow_table_type { MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3, + MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX = 0x4, }; enum mlx5_ib_uapi_flow_action_packet_reformat_type { @@ -73,5 +74,14 @@ struct mlx5_ib_uapi_devx_async_event_hdr { __u8 out_data[]; }; +enum mlx5_ib_uapi_pp_alloc_flags { + MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX = 1 << 0, +}; + +enum mlx5_ib_uapi_uar_alloc_type { + MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF = 0x0, + MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1, +}; + #endif diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h index 76f627f0d13b..66318c44acd7 100644 --- a/include/uapi/scsi/fc/fc_els.h +++ b/include/uapi/scsi/fc/fc_els.h @@ -9,6 +9,7 @@ #define _FC_ELS_H_ #include <linux/types.h> +#include <asm/byteorder.h> /* * Fibre Channel Switch - Enhanced Link Services definitions. @@ -40,6 +41,7 @@ enum fc_els_cmd { ELS_REC = 0x13, /* read exchange concise */ ELS_SRR = 0x14, /* sequence retransmission request */ ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */ + ELS_RDF = 0x19, /* Register Diagnostic Functions */ ELS_PRLI = 0x20, /* process login */ ELS_PRLO = 0x21, /* process logout */ ELS_SCN = 0x22, /* state change notification */ @@ -108,6 +110,7 @@ enum fc_els_cmd { [ELS_REC] = "REC", \ [ELS_SRR] = "SRR", \ [ELS_FPIN] = "FPIN", \ + [ELS_RDF] = "RDF", \ [ELS_PRLI] = "PRLI", \ [ELS_PRLO] = "PRLO", \ [ELS_SCN] = "SCN", \ @@ -208,6 +211,99 @@ enum fc_els_rjt_explan { }; /* + * Link Service TLV Descriptor Tag Values + */ +enum fc_ls_tlv_dtag { + ELS_DTAG_LS_REQ_INFO = 0x00000001, + /* Link Service Request Information Descriptor */ + ELS_DTAG_LNK_INTEGRITY = 0x00020001, + /* Link Integrity Notification Descriptor */ + ELS_DTAG_DELIVERY = 0x00020002, + /* Delivery Notification Descriptor */ + ELS_DTAG_PEER_CONGEST = 0x00020003, + /* Peer Congestion Notification Descriptor */ + ELS_DTAG_CONGESTION = 0x00020004, + /* Congestion Notification Descriptor */ + ELS_DTAG_FPIN_REGISTER = 0x00030001, + /* FPIN Registration Descriptor */ +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define FC_LS_TLV_DTAG_INIT { \ + { ELS_DTAG_LS_REQ_INFO, "Link Service Request Information" }, \ + { ELS_DTAG_LNK_INTEGRITY, "Link Integrity Notification" }, \ + { ELS_DTAG_DELIVERY, "Delivery Notification Present" }, \ + { ELS_DTAG_PEER_CONGEST, "Peer Congestion Notification" }, \ + { ELS_DTAG_CONGESTION, "Congestion Notification" }, \ + { ELS_DTAG_FPIN_REGISTER, "FPIN Registration" }, \ +} + + +/* + * Generic Link Service TLV Descriptor format + * + * This structure, as it defines no payload, will also be referred to + * as the "tlv header" - which contains the tag and len fields. + */ +struct fc_tlv_desc { + __be32 desc_tag; /* Notification Descriptor Tag */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __u8 desc_value[0]; /* Descriptor Value */ +}; + +/* Descriptor tag and len fields are considered the mandatory header + * for a descriptor + */ +#define FC_TLV_DESC_HDR_SZ sizeof(struct fc_tlv_desc) + +/* + * Macro, used when initializing payloads, to return the descriptor length. + * Length is size of descriptor minus the tag and len fields. + */ +#define FC_TLV_DESC_LENGTH_FROM_SZ(desc) \ + (sizeof(desc) - FC_TLV_DESC_HDR_SZ) + +/* Macro, used on received payloads, to return the descriptor length */ +#define FC_TLV_DESC_SZ_FROM_LENGTH(tlv) \ + (__be32_to_cpu((tlv)->desc_len) + FC_TLV_DESC_HDR_SZ) + +/* + * This helper is used to walk descriptors in a descriptor list. + * Given the address of the current descriptor, which minimally contains a + * tag and len field, calculate the address of the next descriptor based + * on the len field. + */ +static inline void *fc_tlv_next_desc(void *desc) +{ + struct fc_tlv_desc *tlv = desc; + + return (desc + FC_TLV_DESC_SZ_FROM_LENGTH(tlv)); +} + + +/* + * Link Service Request Information Descriptor + */ +struct fc_els_lsri_desc { + __be32 desc_tag; /* descriptor tag (0x0000 0001) */ + __be32 desc_len; /* Length of Descriptor (in bytes) (4). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + struct { + __u8 cmd; /* ELS cmd byte */ + __u8 bytes[3]; /* bytes 1..3 */ + } rqst_w0; /* Request word 0 */ +}; + + +/* * Common service parameters (N ports). */ struct fc_els_csp { @@ -819,24 +915,61 @@ enum fc_els_clid_ic { }; +enum fc_fpin_li_event_types { + FPIN_LI_UNKNOWN = 0x0, + FPIN_LI_LINK_FAILURE = 0x1, + FPIN_LI_LOSS_OF_SYNC = 0x2, + FPIN_LI_LOSS_OF_SIG = 0x3, + FPIN_LI_PRIM_SEQ_ERR = 0x4, + FPIN_LI_INVALID_TX_WD = 0x5, + FPIN_LI_INVALID_CRC = 0x6, + FPIN_LI_DEVICE_SPEC = 0xF, +}; + /* - * Fabric Notification Descriptor Tag values + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. */ -enum fc_fn_dtag { - ELS_FN_DTAG_LNK_INTEGRITY = 0x00020001, /* Link Integrity */ - ELS_FN_DTAG_PEER_CONGEST = 0x00020003, /* Peer Congestion */ - ELS_FN_DTAG_CONGESTION = 0x00020004, /* Congestion */ -}; +#define FC_FPIN_LI_EVT_TYPES_INIT { \ + { FPIN_LI_UNKNOWN, "Unknown" }, \ + { FPIN_LI_LINK_FAILURE, "Link Failure" }, \ + { FPIN_LI_LOSS_OF_SYNC, "Loss of Synchronization" }, \ + { FPIN_LI_LOSS_OF_SIG, "Loss of Signal" }, \ + { FPIN_LI_PRIM_SEQ_ERR, "Primitive Sequence Protocol Error" }, \ + { FPIN_LI_INVALID_TX_WD, "Invalid Transmission Word" }, \ + { FPIN_LI_INVALID_CRC, "Invalid CRC" }, \ + { FPIN_LI_DEVICE_SPEC, "Device Specific" }, \ +} + /* - * Fabric Notification Descriptor + * Link Integrity Notification Descriptor */ -struct fc_fn_desc { - __be32 fn_desc_tag; /* Notification Descriptor Tag */ - __be32 fn_desc_value_len; /* Length of Descriptor Value field - * (in bytes) - */ - __u8 fn_desc_value[0]; /* Descriptor Value */ +struct fc_fn_li_desc { + __be32 desc_tag; /* Descriptor Tag (0x00020001) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be64 detecting_wwpn; /* Port Name that detected event */ + __be64 attached_wwpn; /* Port Name of device attached to + * detecting Port Name + */ + __be16 event_type; /* see enum fc_fpin_li_event_types */ + __be16 event_modifier; /* Implementation specific value + * describing the event type + */ + __be32 event_threshold;/* duration in ms of the link + * integrity detection cycle + */ + __be32 event_count; /* minimum number of event + * occurrences during the event + * threshold to caause the LI event + */ + __be32 pname_count; /* number of portname_list elements */ + __be64 pname_list[0]; /* list of N_Port_Names accessible + * through the attached port + */ }; /* @@ -845,8 +978,56 @@ struct fc_fn_desc { struct fc_els_fpin { __u8 fpin_cmd; /* command (0x16) */ __u8 fpin_zero[3]; /* specified as zero - part of cmd */ - __be32 fpin_desc_cnt; /* count of descriptors */ - struct fc_fn_desc fpin_desc[0]; /* Descriptor list */ + __be32 desc_len; /* Length of Descriptor List (in bytes). + * Size of ELS excluding fpin_cmd, + * fpin_zero and desc_len fields. + */ + struct fc_tlv_desc fpin_desc[0]; /* Descriptor list */ +}; + +/* Diagnostic Function Descriptor - FPIN Registration */ +struct fc_df_desc_fpin_reg { + __be32 desc_tag; /* FPIN Registration (0x00030001) */ + __be32 desc_len; /* Length of Descriptor (in bytes). + * Size of descriptor excluding + * desc_tag and desc_len fields. + */ + __be32 count; /* Number of desc_tags elements */ + __be32 desc_tags[0]; /* Array of Descriptor Tags. + * Each tag indicates a function + * supported by the N_Port (request) + * or by the N_Port and Fabric + * Controller (reply; may be a subset + * of the request). + * See ELS_FN_DTAG_xxx for tag values. + */ }; +/* + * ELS_RDF - Register Diagnostic Functions + */ +struct fc_els_rdf { + __u8 fpin_cmd; /* command (0x19) */ + __u8 fpin_zero[3]; /* specified as zero - part of cmd */ + __be32 desc_len; /* Length of Descriptor List (in bytes). + * Size of ELS excluding fpin_cmd, + * fpin_zero and desc_len fields. + */ + struct fc_tlv_desc desc[0]; /* Descriptor list */ +}; + +/* + * ELS RDF LS_ACC Response. + */ +struct fc_els_rdf_resp { + struct fc_els_ls_acc acc_hdr; + __be32 desc_list_len; /* Length of response (in + * bytes). Excludes acc_hdr + * and desc_list_len fields. + */ + struct fc_els_lsri_desc lsri; + struct fc_tlv_desc desc[0]; /* Supported Descriptor list */ +}; + + #endif /* _FC_ELS_H_ */ diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h index 3ae65e93235c..7f5930801f72 100644 --- a/include/uapi/scsi/scsi_bsg_fc.h +++ b/include/uapi/scsi/scsi_bsg_fc.h @@ -209,7 +209,7 @@ struct fc_bsg_host_vendor { __u64 vendor_id; /* start of vendor command area */ - __u32 vendor_cmd[0]; + __u32 vendor_cmd[]; }; /* Response: diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 6048553c119d..a74ca232f1fc 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -17,6 +17,7 @@ #define __LINUX_UAPI_SND_ASOC_H #include <linux/types.h> +#include <sound/asound.h> /* * Maximum number of channels topology kcontrol can represent. diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 56d95673ce0f..7184265c0b0d 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -31,7 +31,7 @@ #include <sound/compress_params.h> -#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2) +#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 2, 0) /** * struct snd_compressed_buffer - compressed buffer * @fragment_size: size of buffer fragment in bytes diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h index 9c96fb0e4d90..79b14389ae41 100644 --- a/include/uapi/sound/compress_params.h +++ b/include/uapi/sound/compress_params.h @@ -75,7 +75,9 @@ #define SND_AUDIOCODEC_G723_1 ((__u32) 0x0000000C) #define SND_AUDIOCODEC_G729 ((__u32) 0x0000000D) #define SND_AUDIOCODEC_BESPOKE ((__u32) 0x0000000E) -#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_BESPOKE +#define SND_AUDIOCODEC_ALAC ((__u32) 0x0000000F) +#define SND_AUDIOCODEC_APE ((__u32) 0x00000010) +#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_APE /* * Profile and modes are listed with bit masks. This allows for a @@ -142,6 +144,9 @@ #define SND_AUDIOPROFILE_WMA8 ((__u32) 0x00000002) #define SND_AUDIOPROFILE_WMA9 ((__u32) 0x00000004) #define SND_AUDIOPROFILE_WMA10 ((__u32) 0x00000008) +#define SND_AUDIOPROFILE_WMA9_PRO ((__u32) 0x00000010) +#define SND_AUDIOPROFILE_WMA9_LOSSLESS ((__u32) 0x00000020) +#define SND_AUDIOPROFILE_WMA10_LOSSLESS ((__u32) 0x00000040) #define SND_AUDIOMODE_WMA_LEVEL1 ((__u32) 0x00000001) #define SND_AUDIOMODE_WMA_LEVEL2 ((__u32) 0x00000002) @@ -326,6 +331,33 @@ struct snd_dec_flac { __u16 reserved; } __attribute__((packed, aligned(4))); +struct snd_dec_wma { + __u32 encoder_option; + __u32 adv_encoder_option; + __u32 adv_encoder_option2; + __u32 reserved; +} __attribute__((packed, aligned(4))); + +struct snd_dec_alac { + __u32 frame_length; + __u8 compatible_version; + __u8 pb; + __u8 mb; + __u8 kb; + __u32 max_run; + __u32 max_frame_bytes; +} __attribute__((packed, aligned(4))); + +struct snd_dec_ape { + __u16 compatible_version; + __u16 compression_level; + __u32 format_flags; + __u32 blocks_per_frame; + __u32 final_frame_blocks; + __u32 total_frames; + __u32 seek_table_present; +} __attribute__((packed, aligned(4))); + union snd_codec_options { struct snd_enc_wma wma; struct snd_enc_vorbis vorbis; @@ -333,6 +365,9 @@ union snd_codec_options { struct snd_enc_flac flac; struct snd_enc_generic generic; struct snd_dec_flac flac_d; + struct snd_dec_wma wma_d; + struct snd_dec_alac alac_d; + struct snd_dec_ape ape_d; } __attribute__((packed, aligned(4))); /** struct snd_codec_desc - description of codec capabilities diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index c0ef1643c753..5995b79d6df1 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -26,7 +26,7 @@ /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 12 +#define SOF_ABI_MINOR 13 #define SOF_ABI_PATCH 0 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ diff --git a/include/vdso/bits.h b/include/vdso/bits.h new file mode 100644 index 000000000000..6d005a1f5d94 --- /dev/null +++ b/include/vdso/bits.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_BITS_H +#define __VDSO_BITS_H + +#include <vdso/const.h> + +#define BIT(nr) (UL(1) << (nr)) + +#endif /* __VDSO_BITS_H */ diff --git a/include/vdso/clocksource.h b/include/vdso/clocksource.h new file mode 100644 index 000000000000..c682e7c60273 --- /dev/null +++ b/include/vdso/clocksource.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CLOCKSOURCE_H +#define __VDSO_CLOCKSOURCE_H + +#include <vdso/limits.h> + +#ifdef CONFIG_GENERIC_GETTIMEOFDAY +#include <asm/vdso/clocksource.h> +#endif /* CONFIG_GENERIC_GETTIMEOFDAY */ + +enum vdso_clock_mode { + VDSO_CLOCKMODE_NONE, +#ifdef CONFIG_GENERIC_GETTIMEOFDAY + VDSO_ARCH_CLOCKMODES, +#endif + VDSO_CLOCKMODE_MAX, + + /* Indicator for time namespace VDSO */ + VDSO_CLOCKMODE_TIMENS = INT_MAX +}; + +#endif /* __VDSO_CLOCKSOURCE_H */ diff --git a/include/vdso/const.h b/include/vdso/const.h new file mode 100644 index 000000000000..94b385ad438d --- /dev/null +++ b/include/vdso/const.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_CONST_H +#define __VDSO_CONST_H + +#include <uapi/linux/const.h> + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* __VDSO_CONST_H */ diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index c5f347cc5e55..5cbc9fcbfd45 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -4,9 +4,20 @@ #ifndef __ASSEMBLY__ -#include <linux/bits.h> -#include <linux/time.h> -#include <linux/types.h> +#include <linux/compiler.h> +#include <uapi/linux/time.h> +#include <uapi/linux/types.h> +#include <uapi/asm-generic/errno-base.h> + +#include <vdso/bits.h> +#include <vdso/clocksource.h> +#include <vdso/ktime.h> +#include <vdso/limits.h> +#include <vdso/math64.h> +#include <vdso/processor.h> +#include <vdso/time.h> +#include <vdso/time32.h> +#include <vdso/time64.h> #define VDSO_BASES (CLOCK_TAI + 1) #define VDSO_HRES (BIT(CLOCK_REALTIME) | \ @@ -21,8 +32,6 @@ #define CS_RAW 1 #define CS_BASES (CS_RAW + 1) -#define VCLOCK_TIMENS UINT_MAX - /** * struct vdso_timestamp - basetime per clock_id * @sec: seconds @@ -101,6 +110,22 @@ struct vdso_data { */ extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); +/* + * The generic vDSO implementation requires that gettimeofday.h + * provides: + * - __arch_get_vdso_data(): to get the vdso datapage. + * - __arch_get_hw_counter(): to get the hw counter based on the + * clock_mode. + * - gettimeofday_fallback(): fallback for gettimeofday. + * - clock_gettime_fallback(): fallback for clock_gettime. + * - clock_getres_fallback(): fallback for clock_getres. + */ +#ifdef ENABLE_COMPAT_VDSO +#include <asm/vdso/compat_gettimeofday.h> +#else +#include <asm/vdso/gettimeofday.h> +#endif /* ENABLE_COMPAT_VDSO */ + #endif /* !__ASSEMBLY__ */ #endif /* __VDSO_DATAPAGE_H */ diff --git a/include/vdso/jiffies.h b/include/vdso/jiffies.h new file mode 100644 index 000000000000..2f9d596c8b29 --- /dev/null +++ b/include/vdso/jiffies.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_JIFFIES_H +#define __VDSO_JIFFIES_H + +#include <asm/param.h> /* for HZ */ +#include <vdso/time64.h> + +/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ +#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) + +#endif /* __VDSO_JIFFIES_H */ diff --git a/include/vdso/ktime.h b/include/vdso/ktime.h new file mode 100644 index 000000000000..a0fd07239e0e --- /dev/null +++ b/include/vdso/ktime.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_KTIME_H +#define __VDSO_KTIME_H + +#include <vdso/jiffies.h> + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (LOW_RES_NSEC) + +#endif /* __VDSO_KTIME_H */ diff --git a/include/vdso/limits.h b/include/vdso/limits.h new file mode 100644 index 000000000000..0197888ad0e0 --- /dev/null +++ b/include/vdso/limits.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_LIMITS_H +#define __VDSO_LIMITS_H + +#define USHRT_MAX ((unsigned short)~0U) +#define SHRT_MAX ((short)(USHRT_MAX >> 1)) +#define SHRT_MIN ((short)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U >> 1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL >> 1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL >> 1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) +#define UINTPTR_MAX ULONG_MAX + +#endif /* __VDSO_LIMITS_H */ diff --git a/include/vdso/math64.h b/include/vdso/math64.h new file mode 100644 index 000000000000..7da703ee5561 --- /dev/null +++ b/include/vdso/math64.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_MATH64_H +#define __VDSO_MATH64_H + +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + +#endif /* __VDSO_MATH64_H */ diff --git a/include/vdso/processor.h b/include/vdso/processor.h new file mode 100644 index 000000000000..fbe8265ea3c4 --- /dev/null +++ b/include/vdso/processor.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __VDSO_PROCESSOR_H +#define __VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#include <asm/vdso/processor.h> + +#endif /* __ASSEMBLY__ */ + +#endif /* __VDSO_PROCESSOR_H */ diff --git a/include/vdso/time.h b/include/vdso/time.h new file mode 100644 index 000000000000..739f53cd2949 --- /dev/null +++ b/include/vdso/time.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME_H +#define __VDSO_TIME_H + +#include <uapi/linux/types.h> + +struct timens_offset { + s64 sec; + u64 nsec; +}; + +#endif /* __VDSO_TIME_H */ diff --git a/include/vdso/time32.h b/include/vdso/time32.h new file mode 100644 index 000000000000..fdf56f932f67 --- /dev/null +++ b/include/vdso/time32.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME32_H +#define __VDSO_TIME32_H + +typedef s32 old_time32_t; + +struct old_timespec32 { + old_time32_t tv_sec; + s32 tv_nsec; +}; + +struct old_timeval32 { + old_time32_t tv_sec; + s32 tv_usec; +}; + +#endif /* __VDSO_TIME32_H */ diff --git a/include/vdso/time64.h b/include/vdso/time64.h new file mode 100644 index 000000000000..9d43c3f5e89d --- /dev/null +++ b/include/vdso/time64.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_TIME64_H +#define __VDSO_TIME64_H + +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000LL + +#endif /* __VDSO_TIME64_H */ diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h index 1f9bc133e230..77252cb46361 100644 --- a/include/video/mmp_disp.h +++ b/include/video/mmp_disp.h @@ -231,7 +231,7 @@ struct mmp_path { /* layers */ int overlay_num; - struct mmp_overlay overlays[0]; + struct mmp_overlay overlays[]; }; extern struct mmp_path *mmp_get_path(const char *name); diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h index b6571c3cfa31..c4a93ce1de48 100644 --- a/include/video/samsung_fimd.h +++ b/include/video/samsung_fimd.h @@ -10,7 +10,7 @@ * * This is the register set for the fimd and new style framebuffer interface * found from the S3C2443 onwards into the S3C2416, S3C2450, the - * S3C64XX series such as the S3C6400 and S3C6410, and EXYNOS series. + * S3C64XX series such as the S3C6400 and S3C6410, and Exynos series. */ /* VIDCON0 */ diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h index 28e7dcd75e82..f8aa8bac5196 100644 --- a/include/xen/interface/io/tpmif.h +++ b/include/xen/interface/io/tpmif.h @@ -46,7 +46,7 @@ struct vtpm_shared_page { uint8_t pad; uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */ - uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */ + uint32_t extra_pages[]; /* grant IDs; length in nr_extra_pages */ }; #endif diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 89a889585ba0..8c0d1edc121c 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -42,6 +42,7 @@ #include <linux/completion.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/semaphore.h> #include <xen/interface/xen.h> #include <xen/interface/grant_table.h> #include <xen/interface/io/xenbus.h> @@ -76,7 +77,7 @@ struct xenbus_device { enum xenbus_state state; struct completion down; struct work_struct work; - spinlock_t reclaim_lock; + struct semaphore reclaim_sem; }; static inline struct xenbus_device *to_xenbus_device(struct device *dev) @@ -208,15 +209,8 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs); int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr); -int xenbus_map_ring(struct xenbus_device *dev, - grant_ref_t *gnt_refs, unsigned int nr_grefs, - grant_handle_t *handles, unsigned long *vaddrs, - bool *leaked); int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); -int xenbus_unmap_ring(struct xenbus_device *dev, - grant_handle_t *handles, unsigned int nr_handles, - unsigned long *vaddrs); int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); int xenbus_free_evtchn(struct xenbus_device *dev, int port); |