summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bug.h6
-rw-r--r--include/asm-generic/cacheflush.h74
-rw-r--r--include/asm-generic/vmlinux.lds.h11
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/dt-bindings/clock/exynos4.h1
-rw-r--r--include/dt-bindings/clock/exynos5420.h18
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h11
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h5
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h3
-rw-r--r--include/dt-bindings/clock/mt8516-clk.h17
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h7
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-msm8998.h29
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h1
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h1
-rw-r--r--include/dt-bindings/clock/stratix10-clock.h4
-rw-r--r--include/dt-bindings/gpio/tegra186-gpio.h41
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h34
-rw-r--r--include/dt-bindings/reset/bitmain,bm1880-reset.h51
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/bits.h17
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/ceph_fs.h2
-rw-r--r--include/linux/ceph/cls_lock_client.h3
-rw-r--r--include/linux/ceph/decode.h13
-rw-r--r--include/linux/ceph/libceph.h10
-rw-r--r--include/linux/ceph/mon_client.h1
-rw-r--r--include/linux/ceph/osd_client.h12
-rw-r--r--include/linux/ceph/striper.h2
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/clk-provider.h103
-rw-r--r--include/linux/clk.h47
-rw-r--r--include/linux/coda.h3
-rw-r--r--include/linux/coda_psdev.h72
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/connector.h63
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/dax.h41
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/device-mapper.h17
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dma/edma.h47
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/fpga/adi-axi-common.h19
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/fs_context.h7
-rw-r--r--include/linux/fs_pin.h1
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/huge_mm.h23
-rw-r--r--include/linux/hw_random.h2
-rw-r--r--include/linux/hwspinlock.h61
-rw-r--r--include/linux/init.h5
-rw-r--r--include/linux/intel_rapl.h155
-rw-r--r--include/linux/io.h1
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/iversion.h24
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kprobes.h19
-rw-r--r--include/linux/libnvdimm.h10
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/lz4.h18
-rw-r--r--include/linux/memory.h11
-rw-r--r--include/linux/memory_hotplug.h27
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mm.h83
-rw-r--r--include/linux/mmzone.h88
-rw-r--r--include/linux/moduleloader.h5
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/node.h14
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/pid.h5
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/ti-sysc.h12
-rw-r--r--include/linux/platform_data/video-clcd-versatile.h28
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h2
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm_qos.h48
-rw-r--r--include/linux/poison.h2
-rw-r--r--include/linux/pseudo_fs.h16
-rw-r--r--include/linux/ramfs.h1
-rw-r--r--include/linux/rbtree.h70
-rw-r--r--include/linux/rbtree_augmented.h27
-rw-r--r--include/linux/remoteproc.h32
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/sched/signal.h16
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/scmi_protocol.h1
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/soc/qcom/mdt_loader.h2
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h274
-rw-r--r--include/linux/sudmac.h49
-rw-r--r--include/linux/sunrpc/bc_xprt.h1
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/metrics.h7
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--include/linux/sunrpc/xprt.h10
-rw-r--r--include/linux/sunrpc/xprtmultipath.h2
-rw-r--r--include/linux/sunrpc/xprtsock.h5
-rw-r--r--include/linux/swapops.h5
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysctl.h7
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/trace_events.h9
-rw-r--r--include/linux/tracehook.h9
-rw-r--r--include/linux/uaccess.h20
-rw-r--r--include/memory/jedec_ddr.h172
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/qman.h9
-rw-r--r--include/sound/hda_codec.h2
-rw-r--r--include/trace/events/btrfs.h40
-rw-r--r--include/trace/events/rpcrdma.h90
-rw-r--r--include/uapi/asm-generic/mman-common.h15
-rw-r--r--include/uapi/asm-generic/mman.h10
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h7
-rw-r--r--include/uapi/linux/adfs_fs.h6
-rw-r--r--include/uapi/linux/bpf.h6
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/coda.h56
-rw-r--r--include/uapi/linux/coda_psdev.h28
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/ptrace.h35
-rw-r--r--include/uapi/linux/virtio_ids.h2
-rw-r--r--include/uapi/linux/virtio_iommu.h161
-rw-r--r--include/uapi/linux/virtio_pmem.h34
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h2
-rw-r--r--include/xen/balloon.h10
-rw-r--r--include/xen/events.h3
-rw-r--r--include/xen/tmem.h18
138 files changed, 2062 insertions, 809 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 0e9bd9c83870..aa6c093d9ce9 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
#else
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() __WARN_TAINT(TAINT_WARN)
-#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
+#define __WARN() do { \
+ printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
+} while (0)
+#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
#define __WARN_printf_taint(taint, arg...) \
do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
#endif
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 0dd47a6db2cf..a950a22c4890 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -5,24 +5,70 @@
/* Keep includes the same across arches. */
#include <linux/mm.h>
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+
/*
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
+static inline void flush_cache_all(void)
+{
+}
+
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_cache_dup_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long vmaddr,
+ unsigned long pfn)
+{
+}
+
+static inline void flush_dcache_page(struct page *page)
+{
+}
+
+static inline void flush_dcache_mmap_lock(struct address_space *mapping)
+{
+}
+
+static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
+{
+}
+
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+}
+
+static inline void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page,
+ unsigned long addr, int len)
+{
+}
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+}
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ca42182992a5..cd28f63bfbc7 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -246,6 +246,16 @@
#define ACPI_PROBE_TABLE(name)
#endif
+#ifdef CONFIG_THERMAL
+#define THERMAL_TABLE(name) \
+ . = ALIGN(8); \
+ __##name##_thermal_table = .; \
+ KEEP(*(__##name##_thermal_table)) \
+ __##name##_thermal_table_end = .;
+#else
+#define THERMAL_TABLE(name)
+#endif
+
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
__dtb_start = .; \
@@ -615,6 +625,7 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
+ THERMAL_TABLE(governor) \
EARLYCON_TABLE() \
LSM_TABLE()
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 083f16747369..e946e20c61d8 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -537,7 +537,7 @@ void drm_connector_list_update(struct drm_connector *connector);
/* parsing cmdline modes */
bool
drm_mode_parse_command_line_for_connector(const char *mode_option,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
struct drm_cmdline_mode *mode);
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index a0439ce8e8d3..88ec3968b90a 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -187,6 +187,7 @@
#define CLK_MIPI_HSI 349 /* Exynos4210 only */
#define CLK_PIXELASYNCM0 351
#define CLK_PIXELASYNCM1 352
+#define CLK_ASYNC_G3D 353 /* Exynos4x12 only */
#define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */
#define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */
#define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 355f469943f1..02d5ac469a3d 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -60,6 +60,7 @@
#define CLK_MAU_EPLL 159
#define CLK_SCLK_HSIC_12M 160
#define CLK_SCLK_MPHY_IXTAL24 161
+#define CLK_SCLK_BPLL 162
/* gate clocks */
#define CLK_UART0 257
@@ -195,6 +196,16 @@
#define CLK_ACLK432_CAM 518
#define CLK_ACLK_FL1550_CAM 519
#define CLK_ACLK550_CAM 520
+#define CLK_CLKM_PHY0 521
+#define CLK_CLKM_PHY1 522
+#define CLK_ACLK_PPMU_DREX0_0 523
+#define CLK_ACLK_PPMU_DREX0_1 524
+#define CLK_ACLK_PPMU_DREX1_0 525
+#define CLK_ACLK_PPMU_DREX1_1 526
+#define CLK_PCLK_PPMU_DREX0_0 527
+#define CLK_PCLK_PPMU_DREX0_1 528
+#define CLK_PCLK_PPMU_DREX1_0 529
+#define CLK_PCLK_PPMU_DREX1_1 530
/* mux clocks */
#define CLK_MOUT_HDMI 640
@@ -217,6 +228,8 @@
#define CLK_MOUT_EPLL 657
#define CLK_MOUT_MAU_EPLL 658
#define CLK_MOUT_USER_MAU_EPLL 659
+#define CLK_MOUT_SCLK_SPLL 660
+#define CLK_MOUT_MX_MSPLL_CCORE_PHY 661
/* divider clocks */
#define CLK_DOUT_PIXEL 768
@@ -248,8 +261,11 @@
#define CLK_DOUT_CCLK_DREX0 794
#define CLK_DOUT_CLK2X_PHY0 795
#define CLK_DOUT_PCLK_CORE_MEM 796
+#define CLK_FF_DOUT_SPLL2 797
+#define CLK_DOUT_PCLK_DREX0 798
+#define CLK_DOUT_PCLK_DREX1 799
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 797
+#define CLK_NR_CLKS 800
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index e10470ed7c4f..b6b127e45634 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -136,5 +136,6 @@
#define CLKID_VDEC_1 204
#define CLKID_VDEC_HEVC 207
#define CLKID_VDEC_HEVCF 210
+#define CLKID_TS 212
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index 1b4353e7b486..07e6c686f3ef 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -239,6 +239,15 @@
#define IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK 222
-#define IMX8MM_CLK_END 223
+#define IMX8MM_CLK_GPIO1_ROOT 223
+#define IMX8MM_CLK_GPIO2_ROOT 224
+#define IMX8MM_CLK_GPIO3_ROOT 225
+#define IMX8MM_CLK_GPIO4_ROOT 226
+#define IMX8MM_CLK_GPIO5_ROOT 227
+
+#define IMX8MM_CLK_SNVS_ROOT 228
+#define IMX8MM_CLK_GIC 229
+
+#define IMX8MM_CLK_END 230
#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 6677e920dc2d..65463673d25e 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -400,5 +400,8 @@
#define IMX8MQ_CLK_GPIO4_ROOT 262
#define IMX8MQ_CLK_GPIO5_ROOT 263
-#define IMX8MQ_CLK_END 264
+#define IMX8MQ_CLK_SNVS_ROOT 264
+#define IMX8MQ_CLK_GIC 265
+
+#define IMX8MQ_CLK_END 266
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 47556539f0ee..68862aaf977e 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -112,5 +112,8 @@
#define CLKID_VDEC_HCODEC 199
#define CLKID_VDEC_2 202
#define CLKID_VDEC_HEVC 206
+#define CLKID_CTS_AMCLK 209
+#define CLKID_CTS_MCLK_I958 212
+#define CLKID_CTS_I958 213
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt8516-clk.h b/include/dt-bindings/clock/mt8516-clk.h
index 9cfca53cd78d..816447b98edd 100644
--- a/include/dt-bindings/clock/mt8516-clk.h
+++ b/include/dt-bindings/clock/mt8516-clk.h
@@ -208,4 +208,21 @@
#define CLK_TOP_MSDC2_INFRA 176
#define CLK_TOP_NR_CLK 177
+/* AUDSYS */
+
+#define CLK_AUD_AFE 0
+#define CLK_AUD_I2S 1
+#define CLK_AUD_22M 2
+#define CLK_AUD_24M 3
+#define CLK_AUD_INTDIR 4
+#define CLK_AUD_APLL2_TUNER 5
+#define CLK_AUD_APLL_TUNER 6
+#define CLK_AUD_HDMI 7
+#define CLK_AUD_SPDF 8
+#define CLK_AUD_ADC 9
+#define CLK_AUD_DAC 10
+#define CLK_AUD_DAC_PREDIS 11
+#define CLK_AUD_TML 12
+#define CLK_AUD_NR_CLK 13
+
#endif /* _DT_BINDINGS_CLK_MT8516_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
index 454b3f43f538..2cd62c98561f 100644
--- a/include/dt-bindings/clock/qcom,gcc-qcs404.h
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -166,5 +166,12 @@
#define GCC_PCIEPHY_0_PHY_BCR 12
#define GCC_EMAC_BCR 13
#define GCC_CDSP_RESTART 14
+#define GCC_PCIE_0_AXI_MASTER_STICKY_ARES 15
+#define GCC_PCIE_0_AHB_ARES 16
+#define GCC_PCIE_0_AXI_SLAVE_ARES 17
+#define GCC_PCIE_0_AXI_MASTER_ARES 18
+#define GCC_PCIE_0_CORE_STICKY_ARES 19
+#define GCC_PCIE_0_SLEEP_ARES 20
+#define GCC_PCIE_0_PIPE_ARES 21
#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-msm8998.h b/include/dt-bindings/clock/qcom,gpucc-msm8998.h
new file mode 100644
index 000000000000..2623570ee974
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-msm8998.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Jeffrey Hugo
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GPUCC_8998_H
+#define _DT_BINDINGS_CLK_MSM_GPUCC_8998_H
+
+#define GPUPLL0 0
+#define GPUPLL0_OUT_EVEN 1
+#define RBCPR_CLK_SRC 2
+#define GFX3D_CLK_SRC 3
+#define RBBMTIMER_CLK_SRC 4
+#define GFX3D_ISENSE_CLK_SRC 5
+#define RBCPR_CLK 6
+#define GFX3D_CLK 7
+#define RBBMTIMER_CLK 8
+#define GFX3D_ISENSE_CLK 9
+#define GPUCC_CXO_CLK 10
+
+#define GPU_CX_BCR 0
+#define RBCPR_BCR 1
+#define GPU_GX_BCR 2
+#define GPU_ISENSE_BCR 3
+
+#define GPU_CX_GDSC 1
+#define GPU_GX_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index 3b245e3df8da..de550ea56eeb 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -64,6 +64,7 @@
#define SCLK_WIFI 141
#define SCLK_OTGPHY0 142
#define SCLK_OTGPHY1 143
+#define SCLK_HDMI_PHY 144
/* dclk gates */
#define DCLK_VOP 190
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index afb811340382..555b4ff660ae 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -164,6 +164,7 @@
#define PCLK_DCF 233
#define PCLK_SARADC 234
#define PCLK_ACODECPHY 235
+#define PCLK_WDT 236
/* hclk gates */
#define HCLK_PERI 308
diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h
index 0ac1c90a18bf..08b98e20b7cc 100644
--- a/include/dt-bindings/clock/stratix10-clock.h
+++ b/include/dt-bindings/clock/stratix10-clock.h
@@ -79,6 +79,8 @@
#define STRATIX10_USB_CLK 59
#define STRATIX10_SPI_M_CLK 60
#define STRATIX10_NAND_CLK 61
-#define STRATIX10_NUM_CLKS 62
+#define STRATIX10_NAND_X_CLK 62
+#define STRATIX10_NAND_ECC_CLK 63
+#define STRATIX10_NUM_CLKS 64
#endif /* __STRATIX10_CLOCK_H */
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
index cabc5712e745..0782b05e2775 100644
--- a/include/dt-bindings/gpio/tegra186-gpio.h
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -41,34 +41,6 @@
#define TEGRA186_MAIN_GPIO(port, offset) \
((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset)
-/* need to keep these for backwards-compatibility */
-#define TEGRA_MAIN_GPIO_PORT_A 0
-#define TEGRA_MAIN_GPIO_PORT_B 1
-#define TEGRA_MAIN_GPIO_PORT_C 2
-#define TEGRA_MAIN_GPIO_PORT_D 3
-#define TEGRA_MAIN_GPIO_PORT_E 4
-#define TEGRA_MAIN_GPIO_PORT_F 5
-#define TEGRA_MAIN_GPIO_PORT_G 6
-#define TEGRA_MAIN_GPIO_PORT_H 7
-#define TEGRA_MAIN_GPIO_PORT_I 8
-#define TEGRA_MAIN_GPIO_PORT_J 9
-#define TEGRA_MAIN_GPIO_PORT_K 10
-#define TEGRA_MAIN_GPIO_PORT_L 11
-#define TEGRA_MAIN_GPIO_PORT_M 12
-#define TEGRA_MAIN_GPIO_PORT_N 13
-#define TEGRA_MAIN_GPIO_PORT_O 14
-#define TEGRA_MAIN_GPIO_PORT_P 15
-#define TEGRA_MAIN_GPIO_PORT_Q 16
-#define TEGRA_MAIN_GPIO_PORT_R 17
-#define TEGRA_MAIN_GPIO_PORT_T 18
-#define TEGRA_MAIN_GPIO_PORT_X 19
-#define TEGRA_MAIN_GPIO_PORT_Y 20
-#define TEGRA_MAIN_GPIO_PORT_BB 21
-#define TEGRA_MAIN_GPIO_PORT_CC 22
-
-#define TEGRA_MAIN_GPIO(port, offset) \
- ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset)
-
/* GPIOs implemented by AON GPIO controller */
#define TEGRA186_AON_GPIO_PORT_S 0
#define TEGRA186_AON_GPIO_PORT_U 1
@@ -82,17 +54,4 @@
#define TEGRA186_AON_GPIO(port, offset) \
((TEGRA186_AON_GPIO_PORT_##port * 8) + offset)
-/* need to keep these for backwards-compatibility */
-#define TEGRA_AON_GPIO_PORT_S 0
-#define TEGRA_AON_GPIO_PORT_U 1
-#define TEGRA_AON_GPIO_PORT_V 2
-#define TEGRA_AON_GPIO_PORT_W 3
-#define TEGRA_AON_GPIO_PORT_Z 4
-#define TEGRA_AON_GPIO_PORT_AA 5
-#define TEGRA_AON_GPIO_PORT_EE 6
-#define TEGRA_AON_GPIO_PORT_FF 7
-
-#define TEGRA_AON_GPIO(port, offset) \
- ((TEGRA_AON_GPIO_PORT_##port * 8) + offset)
-
#endif
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
new file mode 100644
index 000000000000..ec336d31dee4
--- /dev/null
+++ b/include/dt-bindings/power/qcom-aoss-qmp.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Linaro Ltd. */
+
+#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
+#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
+
+#define AOSS_QMP_LS_CDSP 0
+#define AOSS_QMP_LS_LPASS 1
+#define AOSS_QMP_LS_MODEM 2
+#define AOSS_QMP_LS_SLPI 3
+#define AOSS_QMP_LS_SPSS 4
+#define AOSS_QMP_LS_VENUS 5
+
+#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 87d9c6611682..93e36d011527 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -36,4 +36,38 @@
#define MSM8996_VDDSSCX 5
#define MSM8996_VDDSSCX_VFC 6
+/* MSM8998 Power Domain Indexes */
+#define MSM8998_VDDCX 0
+#define MSM8998_VDDCX_AO 1
+#define MSM8998_VDDCX_VFL 2
+#define MSM8998_VDDMX 3
+#define MSM8998_VDDMX_AO 4
+#define MSM8998_VDDMX_VFL 5
+#define MSM8998_SSCCX 6
+#define MSM8998_SSCCX_VFL 7
+#define MSM8998_SSCMX 8
+#define MSM8998_SSCMX_VFL 9
+
+/* QCS404 Power Domains */
+#define QCS404_VDDMX 0
+#define QCS404_VDDMX_AO 1
+#define QCS404_VDDMX_VFL 2
+#define QCS404_LPICX 3
+#define QCS404_LPICX_VFL 4
+#define QCS404_LPIMX 5
+#define QCS404_LPIMX_VFL 6
+
+/* RPM SMD Power Domain performance levels */
+#define RPM_SMD_LEVEL_RETENTION 16
+#define RPM_SMD_LEVEL_RETENTION_PLUS 32
+#define RPM_SMD_LEVEL_MIN_SVS 48
+#define RPM_SMD_LEVEL_LOW_SVS 64
+#define RPM_SMD_LEVEL_SVS 128
+#define RPM_SMD_LEVEL_SVS_PLUS 192
+#define RPM_SMD_LEVEL_NOM 256
+#define RPM_SMD_LEVEL_NOM_PLUS 320
+#define RPM_SMD_LEVEL_TURBO 384
+#define RPM_SMD_LEVEL_TURBO_NO_CPR 416
+#define RPM_SMD_LEVEL_BINNING 512
+
#endif
diff --git a/include/dt-bindings/reset/bitmain,bm1880-reset.h b/include/dt-bindings/reset/bitmain,bm1880-reset.h
new file mode 100644
index 000000000000..4c0de5223773
--- /dev/null
+++ b/include/dt-bindings/reset/bitmain,bm1880-reset.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2018 Bitmain Ltd.
+ * Copyright (c) 2019 Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_BM1880_RESET_H
+#define _DT_BINDINGS_BM1880_RESET_H
+
+#define BM1880_RST_MAIN_AP 0
+#define BM1880_RST_SECOND_AP 1
+#define BM1880_RST_DDR 2
+#define BM1880_RST_VIDEO 3
+#define BM1880_RST_JPEG 4
+#define BM1880_RST_VPP 5
+#define BM1880_RST_GDMA 6
+#define BM1880_RST_AXI_SRAM 7
+#define BM1880_RST_TPU 8
+#define BM1880_RST_USB 9
+#define BM1880_RST_ETH0 10
+#define BM1880_RST_ETH1 11
+#define BM1880_RST_NAND 12
+#define BM1880_RST_EMMC 13
+#define BM1880_RST_SD 14
+#define BM1880_RST_SDMA 15
+#define BM1880_RST_I2S0 16
+#define BM1880_RST_I2S1 17
+#define BM1880_RST_UART0_1_CLK 18
+#define BM1880_RST_UART0_1_ACLK 19
+#define BM1880_RST_UART2_3_CLK 20
+#define BM1880_RST_UART2_3_ACLK 21
+#define BM1880_RST_MINER 22
+#define BM1880_RST_I2C0 23
+#define BM1880_RST_I2C1 24
+#define BM1880_RST_I2C2 25
+#define BM1880_RST_I2C3 26
+#define BM1880_RST_I2C4 27
+#define BM1880_RST_PWM0 28
+#define BM1880_RST_PWM1 29
+#define BM1880_RST_PWM2 30
+#define BM1880_RST_PWM3 31
+#define BM1880_RST_SPI 32
+#define BM1880_RST_GPIO0 33
+#define BM1880_RST_GPIO1 34
+#define BM1880_RST_GPIO2 35
+#define BM1880_RST_EFUSE 36
+#define BM1880_RST_WDT 37
+#define BM1880_RST_AHB_ROM 38
+#define BM1880_RST_SPIC 39
+
+#endif /* _DT_BINDINGS_BM1880_RESET_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a95cce5e82e7..9426b9aaed86 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -324,7 +324,10 @@ struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+ return -1;
+}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 2b7b532c1d51..669d69441a62 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
+
+#include <linux/const.h>
#include <asm/bitsperlong.h>
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT(nr) (UL(1) << (nr))
+#define BIT_ULL(nr) (ULL(1) << (nr))
+#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
@@ -17,10 +19,11 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) \
- (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+ (((~UL(0)) - (UL(1) << (l)) + 1) & \
+ (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
- (((~0ULL) - (1ULL << (l)) + 1) & \
- (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+ (((~ULL(0)) - (ULL(1) << (l)) + 1) & \
+ (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 65a38c4a02a1..39e6f4c57580 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -211,6 +211,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
+ CEPH_FEATURE_MSG_ADDR2 | \
CEPH_FEATURE_CEPHX_V2)
#define CEPH_FEATURES_REQUIRED_DEFAULT 0
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 3ac0feaf2b5e..cb21c5cf12c3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -682,7 +682,7 @@ extern const char *ceph_cap_op_name(int op);
/* flags field in client cap messages (version >= 10) */
#define CEPH_CLIENT_CAPS_SYNC (1<<0)
#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
-#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2);
+#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2)
/*
* caps message, used for capability callbacks, acks, requests, etc.
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
index bea6c77d2093..17bc7584d1fe 100644
--- a/include/linux/ceph/cls_lock_client.h
+++ b/include/linux/ceph/cls_lock_client.h
@@ -52,4 +52,7 @@ int ceph_cls_lock_info(struct ceph_osd_client *osdc,
char *lock_name, u8 *type, char **tag,
struct ceph_locker **lockers, u32 *num_lockers);
+int ceph_cls_assert_locked(struct ceph_osd_request *req, int which,
+ char *lock_name, u8 type, char *cookie, char *tag);
+
#endif
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index a6c2a48d42e0..450384fe487c 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -218,18 +218,27 @@ static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
/*
* sockaddr_storage <-> ceph_sockaddr
*/
-static inline void ceph_encode_addr(struct ceph_entity_addr *a)
+#define CEPH_ENTITY_ADDR_TYPE_NONE 0
+#define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1)
+
+static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = htons(a->in_addr.ss_family);
a->in_addr.ss_family = *(__u16 *)&ss_family;
+
+ /* Banner addresses require TYPE_NONE */
+ a->type = CEPH_ENTITY_ADDR_TYPE_NONE;
}
-static inline void ceph_decode_addr(struct ceph_entity_addr *a)
+static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a)
{
__be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
a->in_addr.ss_family = ntohs(ss_family);
WARN_ON(a->in_addr.ss_family == 512);
+ a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
}
+extern int ceph_decode_entity_addr(void **p, void *end,
+ struct ceph_entity_addr *addr);
/*
* encoders
*/
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 337d5049ff93..82156da3c650 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -84,11 +84,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
/*
- * Handle the largest possible rbd object in one message.
+ * The largest possible rbd data object is 32M.
+ * The largest possible rbd object map object is 64M.
+ *
* There is no limit on the size of cephfs objects, but it has to obey
* rsize and wsize mount options anyway.
*/
-#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
+#define CEPH_MSG_MAX_DATA_LEN (64*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"
@@ -299,10 +301,6 @@ int ceph_wait_for_latest_osdmap(struct ceph_client *client,
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
-
-extern struct page **ceph_get_direct_page_vector(const void __user *data,
- int num_pages,
- bool write_page);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 3a4688af7455..b4d134d3312a 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -104,7 +104,6 @@ struct ceph_mon_client {
#endif
};
-extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
extern int ceph_monmap_contains(struct ceph_monmap *m,
struct ceph_entity_addr *addr);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 2294f963dab7..ad7fe5d10dcd 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -198,9 +198,9 @@ struct ceph_osd_request {
bool r_mempool;
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
- struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
+ struct list_head r_private_item; /* ditto */
void *r_priv; /* ditto */
/* set by submitter */
@@ -389,6 +389,14 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err);
+#define osd_req_op_data(oreq, whch, typ, fld) \
+({ \
+ struct ceph_osd_request *__oreq = (oreq); \
+ unsigned int __whch = (whch); \
+ BUG_ON(__whch >= __oreq->r_num_ops); \
+ &__oreq->r_ops[__whch].typ.fld; \
+})
+
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
@@ -497,7 +505,7 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
const char *class, const char *method,
unsigned int flags,
struct page *req_page, size_t req_len,
- struct page *resp_page, size_t *resp_len);
+ struct page **resp_pages, size_t *resp_len);
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h
index cbd0d24b7148..3486636c0e6e 100644
--- a/include/linux/ceph/striper.h
+++ b/include/linux/ceph/striper.h
@@ -66,4 +66,6 @@ int ceph_extent_to_file(struct ceph_file_layout *l,
struct ceph_file_extent **file_extents,
u32 *num_file_extents);
+u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size);
+
#endif
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index c5311935239d..430e219e3aba 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -624,7 +624,7 @@ struct cftype {
/*
* Control Group subsystem type.
- * See Documentation/cgroup-v1/cgroups.rst for details
+ * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index bb6118f79784..2ae7604783dd 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -9,8 +9,6 @@
#include <linux/of.h>
#include <linux/of_clk.h>
-#ifdef CONFIG_COMMON_CLK
-
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
@@ -807,7 +805,14 @@ void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
+#else
+static inline struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+ return (struct clk_hw *)clk;
+}
+#endif
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
@@ -867,8 +872,6 @@ static inline long divider_ro_round_rate(struct clk_hw *hw, unsigned long rate,
*/
unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
-struct of_device_id;
-
struct clk_onecell_data {
struct clk **clks;
unsigned int clk_num;
@@ -879,8 +882,6 @@ struct clk_hw_onecell_data {
struct clk_hw *hws[];
};
-extern struct of_device_id __clk_of_table;
-
#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
/*
@@ -904,6 +905,40 @@ extern struct of_device_id __clk_of_table;
.ops = _ops, \
})
+#define CLK_HW_INIT_HW(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = (const struct clk_hw*[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/*
+ * This macro is intended for drivers to be able to share the otherwise
+ * individual struct clk_hw[] compound literals created by the compiler
+ * when using CLK_HW_INIT_HW. It does NOT support multiple parents.
+ */
+#define CLK_HW_INIT_HWS(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = _parent, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_FW_NAME(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .fw_name = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
@@ -913,6 +948,24 @@ extern struct of_device_id __clk_of_table;
.ops = _ops, \
})
+#define CLK_HW_INIT_PARENTS_HW(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_hws = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
+#define CLK_HW_INIT_PARENTS_DATA(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ })
+
#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \
(&(struct clk_init_data) { \
.flags = _flags, \
@@ -933,6 +986,43 @@ extern struct of_device_id __clk_of_table;
_flags), \
}
+#define CLK_FIXED_FACTOR_HW(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_HW(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+/*
+ * This macro allows the driver to reuse the _parent array for multiple
+ * fixed factor clk declarations.
+ */
+#define CLK_FIXED_FACTOR_HWS(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+#define CLK_FIXED_FACTOR_FW_NAME(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT_FW_NAME(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
#ifdef CONFIG_OF
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *args,
@@ -1019,5 +1109,4 @@ static inline int of_clk_detect_critical(struct device_node *np, int index,
void clk_gate_restore_context(struct clk_hw *hw);
-#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index c8e3325868bd..3c096c7a51dc 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -329,6 +329,19 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
+
+/**
+ * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Behaves the same as clk_bulk_get() except where there is no clock producer.
+ * In this case, instead of returning -ENOENT, the function returns 0 and
+ * NULL for a clk for which a clock producer could not be determined.
+ */
+int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
@@ -344,6 +357,28 @@ int __must_check clk_bulk_get_all(struct device *dev,
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
+ * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Behaves the same as devm_clk_bulk_get() except where there is no clock
+ * producer. In this case, instead of returning -ENOENT, the function returns
+ * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
+ *
+ * Returns 0 if all clocks specified in clk_bulk_data table are obtained
+ * successfully or for any clk there was no clk provider available, otherwise
+ * returns valid IS_ERR() condition containing errno.
+ * The implementation uses @dev and @clk_bulk_data.id to determine the
+ * clock consumer, and thereby the clock producer.
+ * The clock returned is stored in each @clk_bulk_data.clk field.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
@@ -715,6 +750,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
return 0;
}
+static inline int __must_check clk_bulk_get_optional(struct device *dev,
+ int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
@@ -738,6 +779,12 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
return 0;
}
+static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
+ int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
diff --git a/include/linux/coda.h b/include/linux/coda.h
index d30209b9cef8..0ca0c83fdb1c 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
-#if defined(__linux__)
typedef unsigned long long u_quad_t;
-#endif
+
#include <uapi/linux/coda.h>
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
deleted file mode 100644
index 15170954aa2b..000000000000
--- a/include/linux/coda_psdev.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CODA_PSDEV_H
-#define __CODA_PSDEV_H
-
-#include <linux/backing-dev.h>
-#include <linux/mutex.h>
-#include <uapi/linux/coda_psdev.h>
-
-struct kstatfs;
-
-/* communication pending/processing queues */
-struct venus_comm {
- u_long vc_seq;
- wait_queue_head_t vc_waitq; /* Venus wait queue */
- struct list_head vc_pending;
- struct list_head vc_processing;
- int vc_inuse;
- struct super_block *vc_sb;
- struct mutex vc_mutex;
-};
-
-
-static inline struct venus_comm *coda_vcp(struct super_block *sb)
-{
- return (struct venus_comm *)((sb)->s_fs_info);
-}
-
-/* upcalls */
-int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
-int venus_getattr(struct super_block *sb, struct CodaFid *fid,
- struct coda_vattr *attr);
-int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
-int venus_lookup(struct super_block *sb, struct CodaFid *fid,
- const char *name, int length, int *type,
- struct CodaFid *resfid);
-int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
- kuid_t uid);
-int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
- struct file **f);
-int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length,
- struct CodaFid *newfid, struct coda_vattr *attrs);
-int venus_create(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length, int excl, int mode,
- struct CodaFid *newfid, struct coda_vattr *attrs) ;
-int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length);
-int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
- const char *name, int length);
-int venus_readlink(struct super_block *sb, struct CodaFid *fid,
- char *buffer, int *length);
-int venus_rename(struct super_block *, struct CodaFid *new_fid,
- struct CodaFid *old_fid, size_t old_length,
- size_t new_length, const char *old_name,
- const char *new_name);
-int venus_link(struct super_block *sb, struct CodaFid *fid,
- struct CodaFid *dirfid, const char *name, int len );
-int venus_symlink(struct super_block *sb, struct CodaFid *fid,
- const char *name, int len, const char *symname, int symlen);
-int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
-int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
- unsigned int cmd, struct PioctlData *data);
-int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
-int venus_fsync(struct super_block *sb, struct CodaFid *fid);
-int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
-
-/*
- * Statistics
- */
-
-extern struct venus_comm coda_comms[];
-#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ebddcb6cfcf8..16dafd9f4b86 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -138,8 +138,7 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
-int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
- sigset_t *set, sigset_t *oldset,
+int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
struct compat_sigaction {
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 1d72ef76f24f..6b6c7396a584 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -55,10 +55,71 @@ struct cn_dev {
struct cn_queue_dev *cbdev;
};
+/**
+ * cn_add_callback() - Registers new callback with connector core.
+ *
+ * @id: unique connector's user identifier.
+ * It must be registered in connector.h for legal
+ * in-kernel users.
+ * @name: connector's callback symbolic name.
+ * @callback: connector's callback.
+ * parameters are %cn_msg and the sender's credentials
+ */
int cn_add_callback(struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
-void cn_del_callback(struct cb_id *);
+/**
+ * cn_del_callback() - Unregisters new callback with connector core.
+ *
+ * @id: unique connector's user identifier.
+ */
+void cn_del_callback(struct cb_id *id);
+
+
+/**
+ * cn_netlink_send_mult - Sends message to the specified groups.
+ *
+ * @msg: message header(with attached data).
+ * @len: Number of @msg to be sent.
+ * @portid: destination port.
+ * If non-zero the message will be sent to the given port,
+ * which should be set to the original sender.
+ * @group: destination group.
+ * If @portid and @group is zero, then appropriate group will
+ * be searched through all registered connector users, and
+ * message will be delivered to the group which was created
+ * for user with the same ID as in @msg.
+ * If @group is not zero, then message will be delivered
+ * to the specified group.
+ * @gfp_mask: GFP mask.
+ *
+ * It can be safely called from softirq context, but may silently
+ * fail under strong memory pressure.
+ *
+ * If there are no listeners for given group %-ESRCH can be returned.
+ */
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+
+/**
+ * cn_netlink_send_mult - Sends message to the specified groups.
+ *
+ * @msg: message header(with attached data).
+ * @portid: destination port.
+ * If non-zero the message will be sent to the given port,
+ * which should be set to the original sender.
+ * @group: destination group.
+ * If @portid and @group is zero, then appropriate group will
+ * be searched through all registered connector users, and
+ * message will be delivered to the group which was created
+ * for user with the same ID as in @msg.
+ * If @group is not zero, then message will be delivered
+ * to the specified group.
+ * @gfp_mask: GFP mask.
+ *
+ * It can be safely called from softirq context, but may silently
+ * fail under strong memory pressure.
+ *
+ * If there are no listeners for given group %-ESRCH can be returned.
+ */
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 46b167fba155..536a049d7ecc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -47,11 +47,6 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_user_policy {
- unsigned int min; /* in kHz */
- unsigned int max; /* in kHz */
-};
-
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
@@ -81,7 +76,8 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_user_policy user_policy;
+ struct dev_pm_qos_request *min_freq_req;
+ struct dev_pm_qos_request *max_freq_req;
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -144,6 +140,9 @@ struct cpufreq_policy {
/* Pointer to the cooling device if used for thermal mitigation */
struct thermal_cooling_device *cdev;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
};
struct cpufreq_freqs {
@@ -201,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
+void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
@@ -992,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
-int cpufreq_generic_init(struct cpufreq_policy *policy,
+void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index becaea5f4488..9bd8528bd305 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,9 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+/* Flag for synchronous flush */
+#define DAXDEV_F_SYNC (1UL << 0)
+
typedef unsigned long dax_entry_t;
struct iomap_ops;
@@ -38,18 +41,40 @@ extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host);
struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops);
+ const struct dax_operations *ops, unsigned long flags);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
+bool __dax_synchronous(struct dax_device *dax_dev);
+static inline bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return __dax_synchronous(dax_dev);
+}
+void __set_dax_synchronous(struct dax_device *dax_dev);
+static inline void set_dax_synchronous(struct dax_device *dax_dev)
+{
+ __set_dax_synchronous(dax_dev);
+}
+/*
+ * Check if given mapping is supported by the file / underlying device.
+ */
+static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
+ struct dax_device *dax_dev)
+{
+ if (!(vma->vm_flags & VM_SYNC))
+ return true;
+ if (!IS_DAX(file_inode(vma->vm_file)))
+ return false;
+ return dax_synchronous(dax_dev);
+}
#else
static inline struct dax_device *dax_get_by_host(const char *host)
{
return NULL;
}
static inline struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops)
+ const struct dax_operations *ops, unsigned long flags)
{
/*
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
@@ -70,6 +95,18 @@ static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
{
return false;
}
+static inline bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return true;
+}
+static inline void set_dax_synchronous(struct dax_device *dax_dev)
+{
+}
+static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
+ struct dax_device *dax_dev)
+{
+ return !(vma->vm_flags & VM_SYNC);
+}
#endif
struct writeback_control;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5e0eadf7de55..9451011ac014 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -291,7 +291,6 @@ static inline unsigned d_count(const struct dentry *dentry)
*/
extern __printf(4, 5)
char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 3b470cb03b66..399ad8632356 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -529,29 +529,20 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*---------------------------------------------------------------*/
#define DM_NAME "device-mapper"
-#define DM_RATELIMIT(pr_func, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- \
- if (__ratelimit(&rs)) \
- pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
-} while (0)
-
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
+#define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
+#define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
+#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#ifdef CONFIG_DM_DEBUG
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
+#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
#else
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/device.h b/include/linux/device.h
index 5eabfa0c4dee..c330b75c6c57 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -6,7 +6,7 @@
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
- * See Documentation/driver-model/ for more information.
+ * See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _DEVICE_H_
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
new file mode 100644
index 000000000000..cab6e18773da
--- /dev/null
+++ b/include/linux/dma/edma.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare eDMA core driver
+ *
+ * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
+ */
+
+#ifndef _DW_EDMA_H
+#define _DW_EDMA_H
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+struct dw_edma;
+
+/**
+ * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
+ * @dev: struct device of the eDMA controller
+ * @id: instance ID
+ * @irq: irq line
+ * @dw: struct dw_edma that is filed by dw_edma_probe()
+ */
+struct dw_edma_chip {
+ struct device *dev;
+ int id;
+ int irq;
+ struct dw_edma *dw;
+};
+
+/* Export to the platform drivers */
+#if IS_ENABLED(CONFIG_DW_EDMA)
+int dw_edma_probe(struct dw_edma_chip *chip);
+int dw_edma_remove(struct dw_edma_chip *chip);
+#else
+static inline int dw_edma_probe(struct dw_edma_chip *chip)
+{
+ return -ENODEV;
+}
+
+static inline int dw_edma_remove(struct dw_edma_chip *chip)
+{
+ return 0;
+}
+#endif /* CONFIG_DW_EDMA */
+
+#endif /* _DW_EDMA_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c952f987ee57..8fcdee1c0cf9 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param);
+ dma_filter_fn fn, void *fn_param,
+ struct device_node *np);
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
@@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void)
{
}
static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
- dma_filter_fn fn, void *fn_param)
+ dma_filter_fn fn,
+ void *fn_param,
+ struct device_node *np)
{
return NULL;
}
@@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
-#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_channel(mask, x, y) \
+ __dma_request_channel(&(mask), x, y, NULL)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
__dma_request_slave_channel_compat(&(mask), x, y, dev, name)
@@ -1417,6 +1421,6 @@ static inline struct dma_chan
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param);
+ return __dma_request_channel(mask, fn, fn_param, NULL);
}
#endif /* DMAENGINE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6d944369ca87..ff65d22cf336 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -747,7 +747,7 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
-#define bpf_ctx_wide_store_ok(off, size, type, field) \
+#define bpf_ctx_wide_access_ok(off, size, type, field) \
(size == sizeof(__u64) && \
off >= offsetof(type, field) && \
off + sizeof(__u64) <= offsetofend(type, field) && \
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
new file mode 100644
index 000000000000..7fc95d5c95bb
--- /dev/null
+++ b/include/linux/fpga/adi-axi-common.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 75f2ed289a3f..56b8e358af5c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2210,9 +2210,6 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-extern struct dentry *mount_ns(struct file_system_type *fs_type,
- int flags, void *data, void *ns, struct user_namespace *user_ns,
- int (*fill_super)(struct super_block *, void *, int));
#ifdef CONFIG_BLOCK
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
@@ -2252,28 +2249,10 @@ void free_anon_bdev(dev_t);
struct super_block *sget_fc(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*set)(struct super_block *, struct fs_context *));
-struct super_block *sget_userns(struct file_system_type *type,
- int (*test)(struct super_block *,void *),
- int (*set)(struct super_block *,void *),
- int flags, struct user_namespace *user_ns,
- void *data);
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
-extern struct dentry *mount_pseudo_xattr(struct file_system_type *, char *,
- const struct super_operations *ops,
- const struct xattr_handler **xattr,
- const struct dentry_operations *dops,
- unsigned long);
-
-static inline struct dentry *
-mount_pseudo(struct file_system_type *fs_type, char *name,
- const struct super_operations *ops,
- const struct dentry_operations *dops, unsigned long magic)
-{
- return mount_pseudo_xattr(fs_type, name, ops, NULL, dops, magic);
-}
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 4933187d5b9a..7c6fe3d47fa6 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -99,6 +99,7 @@ struct fs_context {
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
unsigned int sb_flags_mask; /* Superblock flags that were changed */
+ unsigned int s_iflags; /* OR'd with sb->s_iflags */
unsigned int lsm_flags; /* Information flags from the fs to the LSM */
enum fs_context_purpose purpose:8;
enum fs_context_phase phase:8; /* The phase the context is in */
@@ -146,6 +147,12 @@ extern int vfs_get_super(struct fs_context *fc,
enum vfs_get_super_keying keying,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
+extern int get_tree_nodev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
+extern int get_tree_single(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc));
extern const struct file_operations fscontext_fops;
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
index 7cab74d66f85..bdd09fd2520c 100644
--- a/include/linux/fs_pin.h
+++ b/include/linux/fs_pin.h
@@ -20,6 +20,5 @@ static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
}
void pin_remove(struct fs_pin *);
-void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
void pin_insert(struct fs_pin *, struct vfsmount *);
void pin_kill(struct fs_pin *);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 25e2995d4a4c..8a8cb3c401b2 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -427,8 +427,8 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
iter = ftrace_rec_iter_next(iter))
-int ftrace_update_record(struct dyn_ftrace *rec, int enable);
-int ftrace_test_record(struct dyn_ftrace *rec, int enable);
+int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
+int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7cd5c150c21d..45ede62aa85b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -121,6 +121,23 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
bool transparent_hugepage_enabled(struct vm_area_struct *vma);
+#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
+
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ /* Don't have to check pgoff for anonymous vma */
+ if (!vma_is_anonymous(vma)) {
+ if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
+ (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
+ return false;
+ }
+
+ if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ return false;
+ return true;
+}
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -271,6 +288,12 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}
+static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
+ unsigned long haddr)
+{
+ return false;
+}
+
static inline void prep_transhuge_page(struct page *page) {}
#define transparent_hugepage_flags 0UL
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index c0b93e0ff0c0..8e6dd908da21 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -1,7 +1,7 @@
/*
Hardware Random Number Generator
- Please read Documentation/hw_random.txt for details on use.
+ Please read Documentation/admin-guide/hw_random.rst for details on use.
----------------------------------------------------------
This software may be used and distributed according to the terms
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 0afe693be5f4..bfe7c1f1ac6d 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -14,9 +14,10 @@
#include <linux/sched.h>
/* hwspinlock mode argument */
-#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
-#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
-#define HWLOCK_RAW 0x03
+#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
+#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+#define HWLOCK_RAW 0x03
+#define HWLOCK_IN_ATOMIC 0x04 /* Called while in atomic context */
struct device;
struct device_node;
@@ -223,6 +224,23 @@ static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
}
/**
+ * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * This function shall be called only from an atomic context.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
*
@@ -313,6 +331,28 @@ int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
}
/**
+ * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * This function shall be called only from an atomic context and the timeout
+ * value shall not exceed a few msecs.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @to: timeout value in msecs
@@ -387,6 +427,21 @@ static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
}
/**
+ * hwspin_unlock_in_atomic() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
+}
+
+/**
* hwspin_unlock() - unlock hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
*
diff --git a/include/linux/init.h b/include/linux/init.h
index 5255069f5a9f..212fc9e2f691 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -137,6 +137,8 @@ extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
/* Used for contructor calls. */
typedef void (*ctor_fn_t)(void);
+struct file_system_type;
+
/* Defined in init/main.c */
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
@@ -146,7 +148,8 @@ extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
-int __init init_rootfs(void);
+void __init init_rootfs(void);
+extern struct file_system_type rootfs_fs_type;
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
new file mode 100644
index 000000000000..efb3ce892c20
--- /dev/null
+++ b/include/linux/intel_rapl.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data types and headers for RAPL support
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ *
+ * Author: Zhang Rui <rui.zhang@intel.com>
+ */
+
+#ifndef __INTEL_RAPL_H__
+#define __INTEL_RAPL_H__
+
+#include <linux/types.h>
+#include <linux/powercap.h>
+#include <linux/cpuhotplug.h>
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM, /* DRAM control_type */
+ RAPL_DOMAIN_PLATFORM, /* PSys control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_reg_id {
+ RAPL_DOMAIN_REG_LIMIT,
+ RAPL_DOMAIN_REG_STATUS,
+ RAPL_DOMAIN_REG_PERF,
+ RAPL_DOMAIN_REG_POLICY,
+ RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_MAX,
+};
+
+struct rapl_package;
+
+enum rapl_primitives {
+ ENERGY_COUNTER,
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ FW_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ int prim_id; /* primitive ID used to enable */
+ struct rapl_domain *domain;
+ const char *name;
+ u64 last_power_limit;
+};
+
+struct rapl_package;
+
+struct rapl_domain {
+ const char *name;
+ enum rapl_domain_type id;
+ u64 regs[RAPL_DOMAIN_REG_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ unsigned int domain_energy_unit;
+ struct rapl_package *rp;
+};
+
+struct reg_action {
+ u64 reg;
+ u64 mask;
+ u64 value;
+ int err;
+};
+
+/**
+ * struct rapl_if_priv: private data for different RAPL interfaces
+ * @control_type: Each RAPL interface must have its own powercap
+ * control type.
+ * @platform_rapl_domain: Optional. Some RAPL interface may have platform
+ * level RAPL control.
+ * @pcap_rapl_online: CPU hotplug state for each RAPL interface.
+ * @reg_unit: Register for getting energy/power/time unit.
+ * @regs: Register sets for different RAPL Domains.
+ * @limits: Number of power limits supported by each domain.
+ * @read_raw: Callback for reading RAPL interface specific
+ * registers.
+ * @write_raw: Callback for writing RAPL interface specific
+ * registers.
+ */
+struct rapl_if_priv {
+ struct powercap_control_type *control_type;
+ struct rapl_domain *platform_rapl_domain;
+ enum cpuhp_state pcap_rapl_online;
+ u64 reg_unit;
+ u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ int limits[RAPL_DOMAIN_MAX];
+ int (*read_raw)(int cpu, struct reg_action *ra);
+ int (*write_raw)(int cpu, struct reg_action *ra);
+};
+
+/* maximum rapl package domain name: package-%d-die-%d */
+#define PACKAGE_DOMAIN_NAME_LENGTH 30
+
+struct rapl_package {
+ unsigned int id; /* logical die id, equals physical 1-die systems */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+ int lead_cpu; /* one active cpu per package for access */
+ /* Track active cpus */
+ struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
+ struct rapl_if_priv *priv;
+};
+
+struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
+struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
+void rapl_remove_package(struct rapl_package *rp);
+
+int rapl_add_platform_domain(struct rapl_if_priv *priv);
+void rapl_remove_platform_domain(struct rapl_if_priv *priv);
+
+#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/io.h b/include/linux/io.h
index 9876e5801a9d..accac822336a 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
void __init ioremap_huge_init(void);
+int arch_ioremap_p4d_supported(void);
int arch_ioremap_pud_supported(void);
int arch_ioremap_pmd_supported(void);
#else
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 1df9ea187a9a..bc499ceae392 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mm_types.h>
+#include <linux/blkdev.h>
struct address_space;
struct fiemap_extent_info;
@@ -69,6 +70,12 @@ struct iomap {
const struct iomap_page_ops *page_ops;
};
+static inline sector_t
+iomap_sector(struct iomap *iomap, loff_t pos)
+{
+ return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
+}
+
/*
* When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
* and page_done will be called for each page written to. This only applies to
@@ -116,6 +123,16 @@ struct iomap_ops {
};
/*
+ * Main iomap iterator function.
+ */
+typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
+ void *data, struct iomap *iomap);
+
+loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, const struct iomap_ops *ops, void *data,
+ iomap_actor_t actor);
+
+/*
* Structure allocate for each page when block size < PAGE_SIZE to track
* sub-page uptodate status and I/O completions.
*/
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index be50ef7cedab..2917ef990d43 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -113,6 +113,30 @@ inode_peek_iversion_raw(const struct inode *inode)
}
/**
+ * inode_set_max_iversion_raw - update i_version new value is larger
+ * @inode: inode to set
+ * @val: new i_version to set
+ *
+ * Some self-managed filesystems (e.g Ceph) will only update the i_version
+ * value if the new value is larger than the one we already have.
+ */
+static inline void
+inode_set_max_iversion_raw(struct inode *inode, u64 val)
+{
+ u64 cur, old;
+
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ if (cur > val)
+ break;
+ old = atomic64_cmpxchg(&inode->i_version, cur, val);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+}
+
+/**
* inode_set_iversion - set i_version to a particular value
* @inode: inode to set
* @val: new i_version value to set
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0c9bc231107f..4fa360a13c1e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -88,6 +88,8 @@
*/
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 443d9800ca3f..04bdaf01112c 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
}
#endif
+/* Returns true if kprobes handled the fault */
+static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
+ unsigned int trap)
+{
+ if (!kprobes_built_in())
+ return false;
+ if (user_mode(regs))
+ return false;
+ /*
+ * To be potentially processing a kprobe fault and to be allowed
+ * to call kprobe_running(), we have to be non-preemptible.
+ */
+ if (preemptible())
+ return false;
+ if (!kprobe_running())
+ return false;
+ return kprobe_fault_handler(regs, trap);
+}
+
#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 03d5c3aece9d..7a64b3ddb408 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/uuid.h>
#include <linux/spinlock.h>
+#include <linux/bio.h>
struct badrange_entry {
u64 start;
@@ -57,6 +58,9 @@ enum {
*/
ND_REGION_PERSIST_MEMCTRL = 2,
+ /* Platform provides asynchronous flush mechanism */
+ ND_REGION_ASYNC = 3,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -113,6 +117,7 @@ struct nd_mapping_desc {
int position;
};
+struct nd_region;
struct nd_region_desc {
struct resource *res;
struct nd_mapping_desc *mapping;
@@ -125,6 +130,7 @@ struct nd_region_desc {
int target_node;
unsigned long flags;
struct device_node *of_node;
+ int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
struct device;
@@ -252,10 +258,12 @@ unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
-void nvdimm_flush(struct nd_region *nd_region);
+int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
+int generic_nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
int nvdimm_in_overwrite(struct nvdimm *nvdimm);
+bool is_nvdimm_sync(struct nd_region *nd_region);
static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 57baa27f238c..0b0d7259276d 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -5,7 +5,7 @@
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
- * see Documentation/locking/lockdep-design.txt for more details.
+ * see Documentation/locking/lockdep-design.rst for more details.
*/
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 394e3d9213b8..b16e15b9587a 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -278,7 +278,7 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
- * Decompresses data fom 'source' into 'dest'.
+ * Decompresses data from 'source' into 'dest'.
* If the source stream is detected malformed, the function will
* stop decoding and return a negative result.
* This function is protected against buffer overflow exploits,
@@ -522,7 +522,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize);
/**
- * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
@@ -530,7 +530,7 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
- * These decoding function allows decompression of multiple blocks
+ * This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -569,7 +569,7 @@ int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
*
- * These decoding function allows decompression of multiple blocks
+ * This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
@@ -610,10 +610,10 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
- * These decoding function works the same as
+ * This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
- * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
@@ -633,10 +633,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
- * These decoding function works the same as
+ * This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
- * LZ4_decompress_safe_continue()
- * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ * LZ4_decompress_fast_continue()
+ * It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index e1dc1bb2b787..02e633f3ede0 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -111,16 +111,15 @@ extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
-int hotplug_memory_register(int nid, struct mem_section *section);
-#ifdef CONFIG_MEMORY_HOTREMOVE
-extern void unregister_memory_section(struct mem_section *);
-#endif
+int create_memory_block_devices(unsigned long start, unsigned long size);
+void remove_memory_block_devices(unsigned long start, unsigned long size);
extern int memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
-extern struct memory_block *find_memory_block_hinted(struct mem_section *,
- struct memory_block *);
extern struct memory_block *find_memory_block(struct mem_section *);
+typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
+extern int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ae892eef8b82..f46ea71b4ffd 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -123,20 +123,10 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-#ifdef CONFIG_MEMORY_HOTREMOVE
extern void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap);
extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
-#endif /* CONFIG_MEMORY_HOTREMOVE */
-
-/*
- * Do we want sysfs memblock files created. This will allow userspace to online
- * and offline memory explicitly. Lack of this bit means that the caller has to
- * call move_pfn_range_to_zone to finish the initialization.
- */
-
-#define MHP_MEMBLOCK_API (1<<0)
/* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -324,7 +314,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern void remove_memory(int nid, u64 start, u64 size);
+extern int remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
#else
@@ -341,22 +331,25 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
return -EINVAL;
}
-static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline int remove_memory(int nid, u64 start, u64 size)
+{
+ return -EBUSY;
+}
+
static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void __ref free_area_init_core_hotplug(int nid);
-extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
- void *arg, int (*func)(struct memory_block *, void *));
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern bool is_memblock_offlined(struct memory_block *mem);
-extern int sparse_add_one_section(int nid, unsigned long start_pfn,
- struct vmem_altmap *altmap);
-extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
+extern int sparse_add_section(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void sparse_remove_section(struct mem_section *ms,
+ unsigned long pfn, unsigned long nr_pages,
unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e13d9bf2f9a5..7f04754c7f2b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page, enum migrate_mode mode,
- int extra_count);
+ struct page *newpage, struct page *page, int extra_count);
#else
static inline void putback_movable_pages(struct list_head *l) {}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0389c34ac529..0334ca97c584 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -541,13 +541,30 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
vma->vm_ops = NULL;
}
+static inline bool vma_is_anonymous(struct vm_area_struct *vma)
+{
+ return !vma->vm_ops;
+}
+
+#ifdef CONFIG_SHMEM
+/*
+ * The vma_is_shmem is not inline because it is used only by slow
+ * paths in userfault.
+ */
+bool vma_is_shmem(struct vm_area_struct *vma);
+#else
+static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
+#endif
+
+int vma_is_stack_for_current(struct vm_area_struct *vma);
+
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
struct mmu_gather;
struct inode;
-#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
return 0;
@@ -956,41 +973,28 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
}
-static inline bool is_device_private_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
-}
-
-#ifdef CONFIG_PCI_P2PDMA
-static inline bool is_pci_p2pdma_page(const struct page *page)
-{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
-}
-#else /* CONFIG_PCI_P2PDMA */
-static inline bool is_pci_p2pdma_page(const struct page *page)
-{
- return false;
-}
-#endif /* CONFIG_PCI_P2PDMA */
-
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool put_devmap_managed_page(struct page *page)
{
return false;
}
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
{
- return false;
+ return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
+ IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
static inline bool is_pci_p2pdma_page(const struct page *page)
{
- return false;
+ return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
+ IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
/* 127: arbitrary random number, small enough to assemble well */
#define page_ref_zero_or_close_to_overflow(page) \
@@ -1556,6 +1560,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
+int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
+ struct task_struct *task, bool bypass_rlim);
+
/* Container for pinned pfns / pages */
struct frame_vector {
unsigned int nr_allocated; /* Number of frames we have space for */
@@ -1629,23 +1637,6 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-static inline bool vma_is_anonymous(struct vm_area_struct *vma)
-{
- return !vma->vm_ops;
-}
-
-#ifdef CONFIG_SHMEM
-/*
- * The vma_is_shmem is not inline because it is used only by slow
- * paths in userfault.
- */
-bool vma_is_shmem(struct vm_area_struct *vma);
-#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
-#endif
-
-int vma_is_stack_for_current(struct vm_area_struct *vma);
-
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
@@ -1763,7 +1754,7 @@ static inline void sync_mm_rss(struct mm_struct *mm)
}
#endif
-#ifndef __HAVE_ARCH_PTE_DEVMAP
+#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline int pte_devmap(pte_t pte)
{
return 0;
@@ -2767,11 +2758,17 @@ extern int randomize_va_space;
#endif
const char * arch_vma_name(struct vm_area_struct *vma);
+#ifdef CONFIG_MMU
void print_vma_addr(char *prefix, unsigned long rip);
+#else
+static inline void print_vma_addr(char *prefix, unsigned long rip)
+{
+}
+#endif
void *sparse_buffer_alloc(unsigned long size);
-struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
- struct vmem_altmap *altmap);
+struct page * __populate_section_memmap(unsigned long pfn,
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 70394cabaf4e..d77d717c620c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -855,18 +855,6 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return zone_idx(zone) == ZONE_DEVICE;
-}
-#else
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return false;
-}
-#endif
-
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -1160,6 +1148,29 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
+#define SUBSECTION_SHIFT 21
+
+#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
+#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
+#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
+
+#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
+#error Subsection size exceeds section size
+#else
+#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
+#endif
+
+#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
+#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
+
+struct mem_section_usage {
+ DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+ /* See declaration of similar field in struct zone */
+ unsigned long pageblock_flags[0];
+};
+
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
+
struct page;
struct page_ext;
struct mem_section {
@@ -1177,8 +1188,7 @@ struct mem_section {
*/
unsigned long section_mem_map;
- /* See declaration of similar field in struct zone */
- unsigned long *pageblock_flags;
+ struct mem_section_usage *usage;
#ifdef CONFIG_PAGE_EXTENSION
/*
* If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
@@ -1209,6 +1219,11 @@ extern struct mem_section **mem_section;
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
+static inline unsigned long *section_to_usemap(struct mem_section *ms)
+{
+ return ms->usage->pageblock_flags;
+}
+
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
#ifdef CONFIG_SPARSEMEM_EXTREME
@@ -1219,8 +1234,8 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
return NULL;
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
-extern int __section_nr(struct mem_section* ms);
-extern unsigned long usemap_size(void);
+extern unsigned long __section_nr(struct mem_section *ms);
+extern size_t mem_section_usage_size(void);
/*
* We use the lower bits of the mem_map pointer to store
@@ -1238,7 +1253,8 @@ extern unsigned long usemap_size(void);
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_MAP_LAST_BIT (1UL<<3)
+#define SECTION_IS_EARLY (1UL<<3)
+#define SECTION_MAP_LAST_BIT (1UL<<4)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
#define SECTION_NID_SHIFT 3
@@ -1264,6 +1280,11 @@ static inline int valid_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
+static inline int early_section(struct mem_section *section)
+{
+ return (section && (section->section_mem_map & SECTION_IS_EARLY));
+}
+
static inline int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
@@ -1291,14 +1312,42 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
-extern int __highest_present_section_nr;
+extern unsigned long __highest_present_section_nr;
+
+static inline int subsection_map_index(unsigned long pfn)
+{
+ return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ int idx = subsection_map_index(pfn);
+
+ return test_bit(idx, ms->usage->subsection_map);
+}
+#else
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ return 1;
+}
+#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
+ struct mem_section *ms;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ ms = __nr_to_section(pfn_to_section_nr(pfn));
+ if (!valid_section(ms))
+ return 0;
+ /*
+ * Traditionally early sections always returned pfn_valid() for
+ * the entire section-sized span.
+ */
+ return early_section(ms) || pfn_section_valid(ms, pfn);
}
#endif
@@ -1330,6 +1379,7 @@ void sparse_init(void);
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define pfn_present pfn_valid
+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 31013c2effd3..5229c18025e9 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -29,6 +29,11 @@ void *module_alloc(unsigned long size);
/* Free memory returned from module_alloc. */
void module_memfree(void *module_region);
+/* Determines if the section name is an exit section (that is only used during
+ * module unloading)
+ */
+bool module_exit_section(const char *name);
+
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 3093dd162424..dcd03fee6e01 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -151,7 +151,7 @@ static inline bool mutex_is_locked(struct mutex *lock)
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
- * Also see Documentation/locking/mutex-design.txt.
+ * Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 22494d170619..fd59904a282c 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -660,6 +660,7 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_BLOCKED,
PNFS_UPDATE_LAYOUT_INVALID_OPEN,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
+ PNFS_UPDATE_LAYOUT_EXIT,
};
#define NFS4_OP_MAP_NUM_LONGS \
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index d363d5765cdf..0a11712a80e3 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -223,6 +223,8 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */
#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
+#define NFS_INO_DATA_INVAL_DEFER \
+ BIT(13) /* Deferred cache invalidation */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1e78032a174b..a87fe854f008 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -58,6 +58,7 @@ struct nfs_client {
struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
u32 cl_minorversion;/* NFSv4 minorversion */
+ unsigned int cl_nconnect; /* Number of connections */
const char * cl_principal; /* used for machine cred */
#if IS_ENABLED(CONFIG_NFS_V4)
diff --git a/include/linux/node.h b/include/linux/node.h
index 1a557c589ecb..4866f32a02d8 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -137,10 +137,7 @@ static inline int register_one_node(int nid)
extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
-extern int register_mem_sect_under_node(struct memory_block *mem_blk,
- void *arg);
-extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index);
+extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
@@ -171,15 +168,8 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
return 0;
}
-static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
- void *arg)
+static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
- return 0;
-}
-static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
- unsigned long phys_index)
-{
- return 0;
}
static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 40015609c4b5..c842735a4f45 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2367,6 +2367,7 @@
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
+#define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda
#define PCI_VENDOR_ID_USR 0x16ec
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 01e8037023f7..2d9148221e9a 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -97,7 +97,7 @@ static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
#endif
#endif
-#ifdef __HAVE_ARCH_PTE_DEVMAP
+#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
const u64 flags = PFN_DEV|PFN_MAP;
@@ -115,7 +115,7 @@ pmd_t pmd_mkdevmap(pmd_t pmd);
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pud_mkdevmap(pud_t pud);
#endif
-#endif /* __HAVE_ARCH_PTE_DEVMAP */
+#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline bool pfn_t_special(pfn_t pfn)
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 1484db6ca8d1..2a83e434db9d 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -4,6 +4,7 @@
#include <linux/rculist.h>
#include <linux/wait.h>
+#include <linux/refcount.h>
enum pid_type
{
@@ -57,7 +58,7 @@ struct upid {
struct pid
{
- atomic_t count;
+ refcount_t count;
unsigned int level;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
@@ -74,7 +75,7 @@ extern const struct file_operations pidfd_fops;
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
- atomic_inc(&pid->count);
+ refcount_inc(&pid->count);
return pid;
}
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 80f9be858bd0..281adbb26e6b 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -52,7 +52,6 @@ struct imx_dma_data {
int dma_request2; /* secondary DMA request line */
enum sdma_peripheral_type peripheral_type;
int priority;
- struct device_node *of_node;
};
static inline int imx_dma_is_ipu(struct dma_chan *chan)
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 9256c0305968..0c587d4fc718 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -19,6 +19,7 @@ enum ti_sysc_module_type {
struct ti_sysc_cookie {
void *data;
+ void *clkdm;
};
/**
@@ -46,6 +47,10 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_HDQ1W BIT(17)
+#define SYSC_MODULE_QUIRK_I2C BIT(16)
+#define SYSC_MODULE_QUIRK_WDT BIT(15)
+#define SYSS_QUIRK_RESETDONE_INVERTED BIT(14)
#define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13)
#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12)
#define SYSC_QUIRK_SWSUP_SIDLE BIT(11)
@@ -125,9 +130,16 @@ struct ti_sysc_module_data {
};
struct device;
+struct clk;
struct ti_sysc_platform_data {
struct of_dev_auxdata *auxdata;
+ int (*init_clockdomain)(struct device *dev, struct clk *fck,
+ struct clk *ick, struct ti_sysc_cookie *cookie);
+ void (*clkdm_deny_idle)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
+ void (*clkdm_allow_idle)(struct device *dev,
+ const struct ti_sysc_cookie *cookie);
int (*init_module)(struct device *dev,
const struct ti_sysc_module_data *data,
struct ti_sysc_cookie *cookie);
diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h
deleted file mode 100644
index 305ebaec3afd..000000000000
--- a/include/linux/platform_data/video-clcd-versatile.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PLAT_CLCD_H
-#define PLAT_CLCD_H
-
-#ifdef CONFIG_PLAT_VERSATILE_CLCD
-struct clcd_panel *versatile_clcd_get_panel(const char *);
-int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long);
-int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *);
-void versatile_clcd_remove_dma(struct clcd_fb *);
-#else
-static inline struct clcd_panel *versatile_clcd_get_panel(const char *s)
-{
- return NULL;
-}
-static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
-{
- return -ENODEV;
-}
-static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm)
-{
- return -ENODEV;
-}
-static inline void versatile_clcd_remove_dma(struct clcd_fb *fb)
-{
-}
-#endif
-
-#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 8551156b8dca..4802cd2c7309 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -57,7 +57,7 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
-#define ASUS_WMI_DEVID_FAN_MODE 0x00110018
+#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
/* Misc */
#define ASUS_WMI_DEVID_CAMERA 0x00060013
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index beb25f277889..9bc36b589827 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -4,7 +4,7 @@
*
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
*
- * See Documentation/driver-model/ for more information.
+ * See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _PLATFORM_DEVICE_H_
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 6ea1ae373d77..2aebbc5b9950 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -40,6 +40,8 @@ enum pm_qos_flags_status {
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
+#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
@@ -58,6 +60,8 @@ struct pm_qos_flags_request {
enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE,
+ DEV_PM_QOS_MIN_FREQUENCY,
+ DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS,
};
@@ -99,10 +103,14 @@ struct pm_qos_flags {
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
+ struct pm_qos_constraints min_frequency;
+ struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req;
+ struct dev_pm_qos_request *min_frequency_req;
+ struct dev_pm_qos_request *max_frequency_req;
};
/* Action requested to pm_qos_update_target */
@@ -139,16 +147,18 @@ s32 pm_qos_read_value(struct pm_qos_constraints *c);
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
-s32 __dev_pm_qos_read_value(struct device *dev);
-s32 dev_pm_qos_read_value(struct device *dev);
+s32 __dev_pm_qos_resume_latency(struct device *dev);
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value);
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
int dev_pm_qos_add_notifier(struct device *dev,
- struct notifier_block *notifier);
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type);
int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier);
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev,
@@ -174,7 +184,7 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
return dev->power.qos->flags_req->data.flr.flags;
}
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
@@ -187,10 +197,24 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
s32 mask)
{ return PM_QOS_FLAGS_UNDEFINED; }
-static inline s32 __dev_pm_qos_read_value(struct device *dev)
- { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
-static inline s32 dev_pm_qos_read_value(struct device *dev)
+static inline s32 __dev_pm_qos_resume_latency(struct device *dev)
{ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
+static inline s32 dev_pm_qos_read_value(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
static inline int dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type,
@@ -202,10 +226,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{ return 0; }
static inline int dev_pm_qos_add_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{ return 0; }
static inline int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{ return 0; }
static inline void dev_pm_qos_constraints_init(struct device *dev)
{
@@ -241,7 +267,7 @@ static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
}
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
}
diff --git a/include/linux/poison.h b/include/linux/poison.h
index d6d980a681c7..df34330b4e34 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -21,7 +21,7 @@
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
-#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
new file mode 100644
index 000000000000..eceda1d1407a
--- /dev/null
+++ b/include/linux/pseudo_fs.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_PSEUDO_FS__
+#define __LINUX_PSEUDO_FS__
+
+#include <linux/fs_context.h>
+
+struct pseudo_fs_context {
+ const struct super_operations *ops;
+ const struct xattr_handler **xattr;
+ const struct dentry_operations *dops;
+ unsigned long magic;
+};
+
+struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
+ unsigned long magic);
+
+#endif
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 5ef7d54caac2..ee582bdb7fda 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -19,7 +19,6 @@ extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
extern const struct file_operations ramfs_file_operations;
extern const struct vm_operations_struct generic_file_vm_ops;
-extern int __init init_ramfs_fs(void);
int ramfs_fill_super(struct super_block *sb, void *data, int silent);
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index e6337fce08f2..1fd61a9af45c 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -32,25 +32,9 @@ struct rb_root {
struct rb_node *rb_node;
};
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
- struct rb_root rb_root;
- struct rb_node *rb_leftmost;
-};
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
-extern void rb_insert_color_cached(struct rb_node *,
- struct rb_root_cached *, bool);
-extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
-/* Same as rb_first(), but O(1) */
-#define rb_first_cached(root) (root)->rb_leftmost
-
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
-extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
- struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
typeof(*pos), field); 1; }); \
pos = n)
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
+static inline void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root,
+ bool leftmost)
+{
+ if (leftmost)
+ root->rb_leftmost = node;
+ rb_insert_color(node, &root->rb_root);
+}
+
+static inline void rb_erase_cached(struct rb_node *node,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase(node, &root->rb_root);
+}
+
+static inline void rb_replace_node_cached(struct rb_node *victim,
+ struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
+ rb_replace_node(victim, new, &root->rb_root);
+}
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 0f902ccb48b0..179faab29f52 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -30,10 +30,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node,
- struct rb_root *root,
- bool newleft, struct rb_node **leftmost,
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
@@ -48,7 +47,7 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+ __rb_insert_augmented(node, root, augment->rotate);
}
static inline void
@@ -56,8 +55,9 @@ rb_insert_augmented_cached(struct rb_node *node,
struct rb_root_cached *root, bool newleft,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, &root->rb_root,
- newleft, &root->rb_leftmost, augment->rotate);
+ if (newleft)
+ root->rb_leftmost = node;
+ rb_insert_augmented(node, &root->rb_root, augment);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@@ -150,7 +150,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
- struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right;
@@ -158,9 +157,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node *parent, *rebalance;
unsigned long pc;
- if (leftmost && node == *leftmost)
- *leftmost = rb_next(node);
-
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -260,8 +256,7 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root,
- NULL, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
@@ -270,11 +265,9 @@ static __always_inline void
rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
- &root->rb_leftmost,
- augment);
- if (rebalance)
- __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+ if (root->rb_leftmost == node)
+ root->rb_leftmost = rb_next(node);
+ rb_erase_augmented(node, &root->rb_root, augment);
}
#endif /* _LINUX_RBTREE_AUGMENTED_H */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 04d04709f2bd..16ad66683ad0 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -100,7 +100,9 @@ struct fw_rsc_hdr {
* the remote processor will be writing logs.
* @RSC_VDEV: declare support for a virtio device, and serve as its
* virtio header.
- * @RSC_LAST: just keep this one at the end
+ * @RSC_LAST: just keep this one at the end of standard resources
+ * @RSC_VENDOR_START: start of the vendor specific resource types range
+ * @RSC_VENDOR_END: end of the vendor specific resource types range
*
* For more details regarding a specific resource type, please see its
* dedicated structure below.
@@ -111,11 +113,13 @@ struct fw_rsc_hdr {
* please update it as needed.
*/
enum fw_resource_type {
- RSC_CARVEOUT = 0,
- RSC_DEVMEM = 1,
- RSC_TRACE = 2,
- RSC_VDEV = 3,
- RSC_LAST = 4,
+ RSC_CARVEOUT = 0,
+ RSC_DEVMEM = 1,
+ RSC_TRACE = 2,
+ RSC_VDEV = 3,
+ RSC_LAST = 4,
+ RSC_VENDOR_START = 128,
+ RSC_VENDOR_END = 512,
};
#define FW_RSC_ADDR_ANY (-1)
@@ -340,12 +344,26 @@ struct rproc_mem_entry {
struct firmware;
/**
+ * enum rsc_handling_status - return status of rproc_ops handle_rsc hook
+ * @RSC_HANDLED: resource was handled
+ * @RSC_IGNORED: resource was ignored
+ */
+enum rsc_handling_status {
+ RSC_HANDLED = 0,
+ RSC_IGNORED = 1,
+};
+
+/**
* struct rproc_ops - platform-specific device handlers
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
+ * @handle_rsc: optional platform hook to handle vendor resources. Should return
+ * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a
+ * negative value on error
+ * @load_rsc_table: load resource table from firmware image
* @find_loaded_rsc_table: find the loaded resouce table
* @load: load firmware to memory, where the remote processor
* expects to find it
@@ -358,6 +376,8 @@ struct rproc_ops {
void (*kick)(struct rproc *rproc, int vqid);
void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
+ int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
+ int offset, int avail);
struct resource_table *(*find_loaded_rsc_table)(
struct rproc *rproc, const struct firmware *fw);
int (*load)(struct rproc *rproc, const struct firmware *fw);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index e401358c4e7e..9d9c663987d8 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -160,7 +160,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* static then another method for expressing nested locking is
* the explicit definition of lock class keys and the use of
* lockdep_set_class() at lock initialization time.
- * See Documentation/locking/lockdep-design.txt for more details.)
+ * See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 532458698bde..efd8ce7675ed 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -15,10 +15,10 @@
*/
struct sighand_struct {
- refcount_t count;
- struct k_sigaction action[_NSIG];
spinlock_t siglock;
+ refcount_t count;
wait_queue_head_t signalfd_wqh;
+ struct k_sigaction action[_NSIG];
};
/*
@@ -420,7 +420,6 @@ void task_join_group_stop(struct task_struct *task);
static inline void set_restore_sigmask(void)
{
set_thread_flag(TIF_RESTORE_SIGMASK);
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
static inline void clear_tsk_restore_sigmask(struct task_struct *task)
@@ -451,7 +450,6 @@ static inline bool test_and_clear_restore_sigmask(void)
static inline void set_restore_sigmask(void)
{
current->restore_sigmask = true;
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
static inline void clear_tsk_restore_sigmask(struct task_struct *task)
{
@@ -484,6 +482,16 @@ static inline void restore_saved_sigmask(void)
__set_current_blocked(&current->saved_sigmask);
}
+extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
+
+static inline void restore_saved_sigmask_unless(bool interrupted)
+{
+ if (interrupted)
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+ else
+ restore_saved_sigmask();
+}
+
static inline sigset_t *sigmask_to_save(void)
{
sigset_t *res = &current->blocked;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 109a0df5af39..0497091e40c1 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -89,6 +89,7 @@ extern void exit_files(struct task_struct *);
extern void exit_itimers(struct signal_struct *);
extern long _do_fork(struct kernel_clone_args *kargs);
+extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
struct mm_struct *copy_init_mm(void);
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 3105055c00a7..9ff2e9357e9a 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -144,6 +144,7 @@ struct scmi_power_ops {
struct scmi_sensor_info {
u32 id;
u8 type;
+ s8 scale;
char name[SCMI_MAX_STR_SIZE];
};
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 05b179015d6c..2b78cc734719 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -32,7 +32,7 @@ struct device;
/*
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/serial/driver.rst for details.
+ * physical hardware. See Documentation/driver-api/serial/driver.rst for details.
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 78c2bb376954..b5d99482d3fe 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -273,10 +273,6 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
-extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
- sigset_t *oldset, size_t sigsetsize);
-extern void restore_user_sigmask(const void __user *usigmask,
- sigset_t *sigsaved, bool interrupted);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 944b06aefb0f..e600baec6825 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -21,4 +21,6 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len);
+
#endif
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 568722a041bf..6c610e188a44 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -166,29 +166,29 @@ struct ti_sci_dev_ops {
* managed by driver for that purpose.
*/
struct ti_sci_clk_ops {
- int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid,
bool needs_ssc, bool can_change_freq,
bool enable_input_term);
- int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
- int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
- int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid);
+ int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u32 cid);
+ int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u32 cid,
bool *req_state);
- int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ int (*is_on)(const struct ti_sci_handle *handle, u32 did, u32 cid,
bool *req_state, bool *current_state);
- int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ int (*is_off)(const struct ti_sci_handle *handle, u32 did, u32 cid,
bool *req_state, bool *current_state);
- int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
- u8 parent_id);
- int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
- u8 *parent_id);
+ int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid,
+ u32 parent_id);
+ int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u32 cid,
+ u32 *parent_id);
int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did,
- u8 cid, u8 *num_parents);
+ u32 cid, u32 *num_parents);
int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did,
- u8 cid, u64 min_freq, u64 target_freq,
+ u32 cid, u64 min_freq, u64 target_freq,
u64 max_freq, u64 *match_freq);
- int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid,
u64 min_freq, u64 target_freq, u64 max_freq);
- int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u32 cid,
u64 *current_freq);
};
@@ -241,12 +241,254 @@ struct ti_sci_rm_irq_ops {
u16 global_event, u8 vint_status_bit);
};
+/* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0)
+/* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1)
+ /* RA config.count parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2)
+/* RA config.mode parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3)
+/* RA config.size parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4)
+/* RA config.order_id parameter is valid for RM ring configure TISCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5)
+
+#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \
+ (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID)
+
+/**
+ * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations
+ * @config: configure the SoC Navigator Subsystem Ring Accelerator ring
+ * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring
+ * configuration
+ */
+struct ti_sci_rm_ringacc_ops {
+ int (*config)(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 nav_id, u16 index,
+ u32 addr_lo, u32 addr_hi, u32 count, u8 mode,
+ u8 size, u8 order_id
+ );
+ int (*get_config)(const struct ti_sci_handle *handle,
+ u32 nav_id, u32 index, u8 *mode,
+ u32 *addr_lo, u32 *addr_hi, u32 *count,
+ u8 *size, u8 *order_id);
+};
+
+/**
+ * struct ti_sci_rm_psil_ops - PSI-L thread operations
+ * @pair: pair PSI-L source thread to a destination thread.
+ * If the src_thread is mapped to UDMA tchan, the corresponding channel's
+ * TCHAN_THRD_ID register is updated.
+ * If the dst_thread is mapped to UDMA rchan, the corresponding channel's
+ * RCHAN_THRD_ID register is updated.
+ * @unpair: unpair PSI-L source thread from a destination thread.
+ * If the src_thread is mapped to UDMA tchan, the corresponding channel's
+ * TCHAN_THRD_ID register is cleared.
+ * If the dst_thread is mapped to UDMA rchan, the corresponding channel's
+ * RCHAN_THRD_ID register is cleared.
+ */
+struct ti_sci_rm_psil_ops {
+ int (*pair)(const struct ti_sci_handle *handle, u32 nav_id,
+ u32 src_thread, u32 dst_thread);
+ int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id,
+ u32 src_thread, u32 dst_thread);
+};
+
+/* UDMAP channel types */
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13
+
+#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0
+#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2
+
+#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES 1
+#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2
+#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3
+
+/* UDMAP TX/RX channel valid_params common declarations */
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID BIT(14)
+
+/**
+ * Configures a Navigator Subsystem UDMAP transmit channel
+ *
+ * Configures a Navigator Subsystem UDMAP transmit channel registers.
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req
+ */
+struct ti_sci_msg_rm_udmap_tx_ch_cfg {
+ u32 valid_params;
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13)
+ u16 nav_id;
+ u16 index;
+ u8 tx_pause_on_err;
+ u8 tx_filt_einfo;
+ u8 tx_filt_pswords;
+ u8 tx_atype;
+ u8 tx_chan_type;
+ u8 tx_supr_tdpkt;
+ u16 tx_fetch_size;
+ u8 tx_credit_count;
+ u16 txcq_qnum;
+ u8 tx_priority;
+ u8 tx_qos;
+ u8 tx_orderid;
+ u16 fdepth;
+ u8 tx_sched_priority;
+ u8 tx_burst_size;
+};
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive channel
+ *
+ * Configures a Navigator Subsystem UDMAP receive channel registers.
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req
+ */
+struct ti_sci_msg_rm_udmap_rx_ch_cfg {
+ u32 valid_params;
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12)
+ u16 nav_id;
+ u16 index;
+ u16 rx_fetch_size;
+ u16 rxcq_qnum;
+ u8 rx_priority;
+ u8 rx_qos;
+ u8 rx_orderid;
+ u8 rx_sched_priority;
+ u16 flowid_start;
+ u16 flowid_cnt;
+ u8 rx_pause_on_err;
+ u8 rx_atype;
+ u8 rx_chan_type;
+ u8 rx_ignore_short;
+ u8 rx_ignore_long;
+ u8 rx_burst_size;
+};
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive flow
+ *
+ * Configures a Navigator Subsystem UDMAP receive flow's registers.
+ * See @tis_ci_msg_rm_udmap_flow_cfg_req
+ */
+struct ti_sci_msg_rm_udmap_flow_cfg {
+ u32 valid_params;
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18)
+ u16 nav_id;
+ u16 flow_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+ u8 rx_ps_location;
+};
+
+/**
+ * struct ti_sci_rm_udmap_ops - UDMA Management operations
+ * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel.
+ * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel.
+ * @rx_flow_cfg1: configure SoC Navigator Subsystem UDMA receive flow.
+ */
+struct ti_sci_rm_udmap_ops {
+ int (*tx_ch_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params);
+ int (*rx_ch_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params);
+ int (*rx_flow_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_flow_cfg *params);
+};
+
+/**
+ * struct ti_sci_proc_ops - Processor Control operations
+ * @request: Request to control a physical processor. The requesting host
+ * should be in the processor access list
+ * @release: Relinquish a physical processor control
+ * @handover: Handover a physical processor control to another host
+ * in the permitted list
+ * @set_config: Set base configuration of a processor
+ * @set_control: Setup limited control flags in specific cases
+ * @get_status: Get the state of physical processor
+ *
+ * NOTE: The following paramteres are generic in nature for all these ops,
+ * -handle: Pointer to TI SCI handle as retrieved by *ti_sci_get_handle
+ * -pid: Processor ID
+ * -hid: Host ID
+ */
+struct ti_sci_proc_ops {
+ int (*request)(const struct ti_sci_handle *handle, u8 pid);
+ int (*release)(const struct ti_sci_handle *handle, u8 pid);
+ int (*handover)(const struct ti_sci_handle *handle, u8 pid, u8 hid);
+ int (*set_config)(const struct ti_sci_handle *handle, u8 pid,
+ u64 boot_vector, u32 cfg_set, u32 cfg_clr);
+ int (*set_control)(const struct ti_sci_handle *handle, u8 pid,
+ u32 ctrl_set, u32 ctrl_clr);
+ int (*get_status)(const struct ti_sci_handle *handle, u8 pid,
+ u64 *boot_vector, u32 *cfg_flags, u32 *ctrl_flags,
+ u32 *status_flags);
+};
+
/**
* struct ti_sci_ops - Function support for TI SCI
* @dev_ops: Device specific operations
* @clk_ops: Clock specific operations
* @rm_core_ops: Resource management core operations.
* @rm_irq_ops: IRQ management specific operations
+ * @proc_ops: Processor Control specific operations
*/
struct ti_sci_ops {
struct ti_sci_core_ops core_ops;
@@ -254,6 +496,10 @@ struct ti_sci_ops {
struct ti_sci_clk_ops clk_ops;
struct ti_sci_rm_core_ops rm_core_ops;
struct ti_sci_rm_irq_ops rm_irq_ops;
+ struct ti_sci_rm_ringacc_ops rm_ring_ops;
+ struct ti_sci_rm_psil_ops rm_psil_ops;
+ struct ti_sci_rm_udmap_ops rm_udmap_ops;
+ struct ti_sci_proc_ops proc_ops;
};
/**
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h
deleted file mode 100644
index cccc0a665d26..000000000000
--- a/include/linux/sudmac.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header for the SUDMAC driver
- *
- * Copyright (C) 2013 Renesas Solutions Corp.
- */
-#ifndef SUDMAC_H
-#define SUDMAC_H
-
-#include <linux/dmaengine.h>
-#include <linux/shdma-base.h>
-#include <linux/types.h>
-
-/* Used by slave DMA clients to request DMA to/from a specific peripheral */
-struct sudmac_slave {
- struct shdma_slave shdma_slave; /* Set by the platform */
-};
-
-/*
- * Supplied by platforms to specify, how a DMA channel has to be configured for
- * a certain peripheral
- */
-struct sudmac_slave_config {
- int slave_id;
-};
-
-struct sudmac_channel {
- unsigned long offset;
- unsigned long config;
- unsigned long wait; /* The configuable range is 0 to 3 */
- unsigned long dint_end_bit;
-};
-
-struct sudmac_pdata {
- const struct sudmac_slave_config *slave;
- int slave_num;
- const struct sudmac_channel *channel;
- int channel_num;
-};
-
-/* Definitions for the sudmac_channel.config */
-#define SUDMAC_TX_BUFFER_MODE BIT(0)
-#define SUDMAC_RX_END_MODE BIT(1)
-
-/* Definitions for the sudmac_channel.dint_end_bit */
-#define SUDMAC_DMA_BIT_CH0 BIT(0)
-#define SUDMAC_DMA_BIT_CH1 BIT(1)
-
-#endif
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index d4229a78524a..87d27e13d885 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -43,6 +43,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs);
void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs);
void xprt_free_bc_rqst(struct rpc_rqst *req);
+unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt);
/*
* Determine if a shared backchannel is in use
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6e8073140a5d..abc63bd1be2b 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -124,6 +124,7 @@ struct rpc_create_args {
u32 prognumber; /* overrides program->number */
u32 version;
rpc_authflavor_t authflavor;
+ u32 nconnect;
unsigned long flags;
char *client_name;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
@@ -163,6 +164,8 @@ void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
void rpc_task_release_transport(struct rpc_task *);
void rpc_task_release_client(struct rpc_task *);
+struct rpc_xprt *rpc_task_get_xprt(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt);
int rpcb_create_local(struct net *);
void rpcb_put_local(struct net *);
@@ -191,6 +194,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
size_t rpc_max_bc_payload(struct rpc_clnt *);
+unsigned int rpc_num_bc_slots(struct rpc_clnt *);
void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 1b3751327575..0ee3f7052846 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -30,7 +30,7 @@
#include <linux/ktime.h>
#include <linux/spinlock.h>
-#define RPC_IOSTATS_VERS "1.0"
+#define RPC_IOSTATS_VERS "1.1"
struct rpc_iostats {
spinlock_t om_lock;
@@ -66,6 +66,11 @@ struct rpc_iostats {
ktime_t om_queue, /* queued for xmit */
om_rtt, /* RPC RTT */
om_execute; /* RPC execution */
+ /*
+ * The count of operations that complete with tk_status < 0.
+ * These statuses usually indicate error conditions.
+ */
+ unsigned long om_error_status;
} ____cacheline_aligned;
struct rpc_task;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d0e451868f02..baa3ecdb882f 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -126,6 +126,7 @@ struct rpc_task_setup {
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
+#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
@@ -183,8 +184,9 @@ struct rpc_task_setup {
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
struct rpc_timer {
- struct timer_list timer;
struct list_head list;
+ unsigned long expires;
+ struct delayed_work dwork;
};
/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index a6d9fce7f20e..13e108bcc9eb 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -158,6 +158,7 @@ struct rpc_xprt_ops {
int (*bc_setup)(struct rpc_xprt *xprt,
unsigned int min_reqs);
size_t (*bc_maxpayload)(struct rpc_xprt *xprt);
+ unsigned int (*bc_num_slots)(struct rpc_xprt *xprt);
void (*bc_free_rqst)(struct rpc_rqst *rqst);
void (*bc_destroy)(struct rpc_xprt *xprt,
unsigned int max_reqs);
@@ -238,6 +239,7 @@ struct rpc_xprt {
/*
* Send stuff
*/
+ atomic_long_t queuelen;
spinlock_t transport_lock; /* lock transport info */
spinlock_t reserve_lock; /* lock slot table */
spinlock_t queue_lock; /* send/receive queue lock */
@@ -250,8 +252,9 @@ struct rpc_xprt {
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct svc_serv *bc_serv; /* The RPC service which will */
/* process the callback */
- int bc_alloc_count; /* Total number of preallocs */
- atomic_t bc_free_slots;
+ unsigned int bc_alloc_max;
+ unsigned int bc_alloc_count; /* Total number of preallocs */
+ atomic_t bc_slot_count; /* Number of allocated slots */
spinlock_t bc_pa_lock; /* Protects the preallocated
* items */
struct list_head bc_pa_list; /* List of preallocated
@@ -334,6 +337,9 @@ struct xprt_class {
*/
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
void xprt_connect(struct rpc_task *task);
+unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt);
+void xprt_reconnect_backoff(struct rpc_xprt *xprt,
+ unsigned long init_to);
void xprt_reserve(struct rpc_task *task);
void xprt_retry_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index af1257c030d2..c6cce3fbf29d 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -15,6 +15,8 @@ struct rpc_xprt_switch {
struct kref xps_kref;
unsigned int xps_nxprts;
+ unsigned int xps_nactive;
+ atomic_long_t xps_queuelen;
struct list_head xps_xprt_list;
struct net * xps_net;
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index b81d0b3e0799..7638dbe7bc50 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -56,6 +56,7 @@ struct sock_xprt {
*/
unsigned long sock_state;
struct delayed_work connect_worker;
+ struct work_struct error_worker;
struct work_struct recv_worker;
struct mutex recv_mutex;
struct sockaddr_storage srcaddr;
@@ -84,6 +85,10 @@ struct sock_xprt {
#define XPRT_SOCK_CONNECTING 1U
#define XPRT_SOCK_DATA_READY (2)
#define XPRT_SOCK_UPD_TIMEOUT (3)
+#define XPRT_SOCK_WAKE_ERROR (4)
+#define XPRT_SOCK_WAKE_WRITE (5)
+#define XPRT_SOCK_WAKE_PENDING (6)
+#define XPRT_SOCK_WAKE_DISCONNECT (7)
#endif /* __KERNEL__ */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 15bdb6fe71e5..877fd239b6ff 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -6,6 +6,8 @@
#include <linux/bug.h>
#include <linux/mm_types.h>
+#ifdef CONFIG_MMU
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
@@ -50,13 +52,11 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
-#ifdef CONFIG_MMU
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
return !pte_none(pte) && !pte_present(pte);
}
-#endif
/*
* Convert the arch-dependent pte representation of a swp_entry_t into an
@@ -360,4 +360,5 @@ static inline int non_swap_entry(swp_entry_t entry)
}
#endif
+#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b01d54a5732e..88145da7d140 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1231,8 +1231,8 @@ asmlinkage long sys_ni_syscall(void);
* the ksys_xyzyyz() functions prototyped below.
*/
-int ksys_mount(char __user *dev_name, char __user *dir_name, char __user *type,
- unsigned long flags, void __user *data);
+int ksys_mount(const char __user *dev_name, const char __user *dir_name,
+ const char __user *type, unsigned long flags, void __user *data);
int ksys_umount(char __user *name, int flags);
int ksys_dup(unsigned int fildes);
int ksys_chroot(const char __user *filename);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index aadd310769d0..6df477329b76 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -37,6 +37,13 @@ struct ctl_table_root;
struct ctl_table_header;
struct ctl_dir;
+/* Keep the same order as in fs/proc/proc_sysctl.c */
+#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
+#define SYSCTL_ONE ((void *)&sysctl_vals[1])
+#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2])
+
+extern const int sysctl_vals[];
+
typedef int proc_handler (struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 15a4ca5d7099..681047f8cc05 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -251,7 +251,7 @@ struct thermal_bind_params {
* platform characterization. This value is relative to the
* rest of the weights so a cooling device whose weight is
* double that of another cooling device is twice as
- * effective. See Documentation/thermal/sysfs-api.txt for more
+ * effective. See Documentation/thermal/sysfs-api.rst for more
* information.
*/
int weight;
@@ -259,7 +259,7 @@ struct thermal_bind_params {
/*
* This is a bit mask that gives the binding relation between this
* thermal zone and cdev, for a particular trip point.
- * See Documentation/thermal/sysfs-api.txt for more information.
+ * See Documentation/thermal/sysfs-api.rst for more information.
*/
int trip_mask;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 8a62731673f7..5150436783e8 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -142,6 +142,7 @@ enum print_line_t {
enum print_line_t trace_handle_return(struct trace_seq *s);
void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned short type,
unsigned long flags,
int pc);
struct trace_event_file;
@@ -317,6 +318,14 @@ trace_event_name(struct trace_event_call *call)
return call->name;
}
+static inline struct list_head *
+trace_get_fields(struct trace_event_call *event_call)
+{
+ if (!event_call->class->get_fields)
+ return &event_call->class->fields;
+ return event_call->class->get_fields(event_call);
+}
+
struct trace_array;
struct trace_subsystem_dir;
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 8446573cc682..36fb3bbed6b2 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -54,13 +54,15 @@ struct linux_binprm;
/*
* ptrace report for syscall entry and exit looks identical.
*/
-static inline int ptrace_report_syscall(struct pt_regs *regs)
+static inline int ptrace_report_syscall(struct pt_regs *regs,
+ unsigned long message)
{
int ptrace = current->ptrace;
if (!(ptrace & PT_PTRACED))
return 0;
+ current->ptrace_message = message;
ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
/*
@@ -73,6 +75,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
current->exit_code = 0;
}
+ current->ptrace_message = 0;
return fatal_signal_pending(current);
}
@@ -98,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
static inline __must_check int tracehook_report_syscall_entry(
struct pt_regs *regs)
{
- return ptrace_report_syscall(regs);
+ return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY);
}
/**
@@ -123,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
if (step)
user_single_step_report(regs);
else
- ptrace_report_syscall(regs);
+ ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT);
}
/**
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 2b70130af585..34a038563d97 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -203,7 +203,10 @@ static inline void pagefault_enable(void)
/*
* Is the pagefault handler disabled? If so, user access methods will not sleep.
*/
-#define pagefault_disabled() (current->pagefault_disabled != 0)
+static inline bool pagefault_disabled(void)
+{
+ return current->pagefault_disabled != 0;
+}
/*
* The pagefault handler is in general disabled by pagefault_disable() or
@@ -240,6 +243,18 @@ extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
+ * probe_user_read(): safely attempt to read from a location in user space
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+ *
+ * Safely read from address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long probe_user_read(void *dst, const void __user *src, size_t size);
+extern long __probe_user_read(void *dst, const void __user *src, size_t size);
+
+/*
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
@@ -252,6 +267,9 @@ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+ long count);
+extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
/**
* probe_kernel_address(): safely attempt to read from a location
diff --git a/include/memory/jedec_ddr.h b/include/memory/jedec_ddr.h
deleted file mode 100644
index 90a9dabbe606..000000000000
--- a/include/memory/jedec_ddr.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Definitions for DDR memories based on JEDEC specs
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- *
- * Aneesh V <aneesh@ti.com>
- */
-#ifndef __LINUX_JEDEC_DDR_H
-#define __LINUX_JEDEC_DDR_H
-
-#include <linux/types.h>
-
-/* DDR Densities */
-#define DDR_DENSITY_64Mb 1
-#define DDR_DENSITY_128Mb 2
-#define DDR_DENSITY_256Mb 3
-#define DDR_DENSITY_512Mb 4
-#define DDR_DENSITY_1Gb 5
-#define DDR_DENSITY_2Gb 6
-#define DDR_DENSITY_4Gb 7
-#define DDR_DENSITY_8Gb 8
-#define DDR_DENSITY_16Gb 9
-#define DDR_DENSITY_32Gb 10
-
-/* DDR type */
-#define DDR_TYPE_DDR2 1
-#define DDR_TYPE_DDR3 2
-#define DDR_TYPE_LPDDR2_S4 3
-#define DDR_TYPE_LPDDR2_S2 4
-#define DDR_TYPE_LPDDR2_NVM 5
-
-/* DDR IO width */
-#define DDR_IO_WIDTH_4 1
-#define DDR_IO_WIDTH_8 2
-#define DDR_IO_WIDTH_16 3
-#define DDR_IO_WIDTH_32 4
-
-/* Number of Row bits */
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-#define R16 16
-
-/* Number of Column bits */
-#define C7 7
-#define C8 8
-#define C9 9
-#define C10 10
-#define C11 11
-#define C12 12
-
-/* Number of Banks */
-#define B1 0
-#define B2 1
-#define B4 2
-#define B8 3
-
-/* Refresh rate in nano-seconds */
-#define T_REFI_15_6 15600
-#define T_REFI_7_8 7800
-#define T_REFI_3_9 3900
-
-/* tRFC values */
-#define T_RFC_90 90000
-#define T_RFC_110 110000
-#define T_RFC_130 130000
-#define T_RFC_160 160000
-#define T_RFC_210 210000
-#define T_RFC_300 300000
-#define T_RFC_350 350000
-
-/* Mode register numbers */
-#define DDR_MR0 0
-#define DDR_MR1 1
-#define DDR_MR2 2
-#define DDR_MR3 3
-#define DDR_MR4 4
-#define DDR_MR5 5
-#define DDR_MR6 6
-#define DDR_MR7 7
-#define DDR_MR8 8
-#define DDR_MR9 9
-#define DDR_MR10 10
-#define DDR_MR11 11
-#define DDR_MR16 16
-#define DDR_MR17 17
-#define DDR_MR18 18
-
-/*
- * LPDDR2 related defines
- */
-
-/* MR4 register fields */
-#define MR4_SDRAM_REF_RATE_SHIFT 0
-#define MR4_SDRAM_REF_RATE_MASK 7
-#define MR4_TUF_SHIFT 7
-#define MR4_TUF_MASK (1 << 7)
-
-/* MR4 SDRAM Refresh Rate field values */
-#define SDRAM_TEMP_NOMINAL 0x3
-#define SDRAM_TEMP_RESERVED_4 0x4
-#define SDRAM_TEMP_HIGH_DERATE_REFRESH 0x5
-#define SDRAM_TEMP_HIGH_DERATE_REFRESH_AND_TIMINGS 0x6
-#define SDRAM_TEMP_VERY_HIGH_SHUTDOWN 0x7
-
-#define NUM_DDR_ADDR_TABLE_ENTRIES 11
-#define NUM_DDR_TIMING_TABLE_ENTRIES 4
-
-/* Structure for DDR addressing info from the JEDEC spec */
-struct lpddr2_addressing {
- u32 num_banks;
- u32 tREFI_ns;
- u32 tRFCab_ps;
-};
-
-/*
- * Structure for timings from the LPDDR2 datasheet
- * All parameters are in pico seconds(ps) unless explicitly indicated
- * with a suffix like tRAS_max_ns below
- */
-struct lpddr2_timings {
- u32 max_freq;
- u32 min_freq;
- u32 tRPab;
- u32 tRCD;
- u32 tWR;
- u32 tRAS_min;
- u32 tRRD;
- u32 tWTR;
- u32 tXP;
- u32 tRTP;
- u32 tCKESR;
- u32 tDQSCK_max;
- u32 tDQSCK_max_derated;
- u32 tFAW;
- u32 tZQCS;
- u32 tZQCL;
- u32 tZQinit;
- u32 tRAS_max_ns;
-};
-
-/*
- * Min value for some parameters in terms of number of tCK cycles(nCK)
- * Please set to zero parameters that are not valid for a given memory
- * type
- */
-struct lpddr2_min_tck {
- u32 tRPab;
- u32 tRCD;
- u32 tWR;
- u32 tRASmin;
- u32 tRRD;
- u32 tWTR;
- u32 tXP;
- u32 tRTP;
- u32 tCKE;
- u32 tCKESR;
- u32 tFAW;
-};
-
-extern const struct lpddr2_addressing
- lpddr2_jedec_addressing_table[NUM_DDR_ADDR_TABLE_ENTRIES];
-extern const struct lpddr2_timings
- lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
-extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
-
-#endif /* __LINUX_JEDEC_DDR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cca3c59b98bf..f42d300f0cfa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1064,7 +1064,8 @@ void tcp_get_default_congestion_control(struct net *net, char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
+ bool reinit, bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index 5b99cb2ea5ef..173e4049d963 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -133,5 +133,13 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
* failed to probe or 0 if the bman driver did not probed yet.
*/
int bman_is_probed(void);
+/**
+ * bman_portals_probed - Check if all cpu bound bman portals are probed
+ *
+ * Returns 1 if all the required cpu bound bman portals successfully probed,
+ * -1 if probe errors appeared or 0 if the bman portals did not yet finished
+ * probing.
+ */
+int bman_portals_probed(void);
#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 5cc7af06c1ba..aa31c05a103a 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1195,6 +1195,15 @@ int qman_release_cgrid(u32 id);
int qman_is_probed(void);
/**
+ * qman_portals_probed - Check if all cpu bound qman portals are probed
+ *
+ * Returns 1 if all the required cpu bound qman portals successfully probed,
+ * -1 if probe errors appeared or 0 if the qman portals did not yet finished
+ * probing.
+ */
+int qman_portals_probed(void);
+
+/**
* qman_dqrr_get_ithresh - Get coalesce interrupt threshold
* @portal: portal to get the value for
* @ithresh: threshold pointer
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 8f46ff3449d5..871993696c5f 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -252,6 +252,8 @@ struct hda_codec {
unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
+
#ifdef CONFIG_PM
unsigned long power_on_acct;
unsigned long power_off_acct;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index f9eff010fc7e..2f6a669408bb 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -29,6 +29,7 @@ struct btrfs_qgroup_extent_record;
struct btrfs_qgroup;
struct extent_io_tree;
struct prelim_ref;
+struct btrfs_space_info;
TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR);
TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS);
@@ -2091,6 +2092,45 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
+DECLARE_EVENT_CLASS(btrfs__space_info_update,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, type )
+ __field( u64, old )
+ __field( s64, diff )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->type = sinfo->flags;
+ __entry->old = old;
+ __entry->diff = diff;
+ ),
+ TP_printk_btrfs("type=%s old=%llu diff=%lld",
+ __print_flags(__entry->type, "|", BTRFS_GROUP_FLAGS),
+ __entry->old, __entry->diff)
+);
+
+DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff)
+);
+
+DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index df9851cb82b2..f6a4eaa85a3e 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -181,18 +181,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
), \
TP_ARGS(task, mr, nsegs))
-TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
-TRACE_DEFINE_ENUM(FRWR_IS_VALID);
-TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
-TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
-
-#define xprtrdma_show_frwr_state(x) \
- __print_symbolic(x, \
- { FRWR_IS_INVALID, "INVALID" }, \
- { FRWR_IS_VALID, "VALID" }, \
- { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
- { FRWR_FLUSHED_LI, "FLUSHED_LI" })
-
DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
TP_PROTO(
const struct ib_wc *wc,
@@ -203,22 +191,19 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
TP_STRUCT__entry(
__field(const void *, mr)
- __field(unsigned int, state)
__field(unsigned int, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
- __entry->state = frwr->fr_state;
__entry->status = wc->status;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
),
TP_printk(
- "mr=%p state=%s: %s (%u/0x%x)",
- __entry->mr, xprtrdma_show_frwr_state(__entry->state),
- rdma_show_wc_status(__entry->status),
+ "mr=%p: %s (%u/0x%x)",
+ __entry->mr, rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
@@ -390,6 +375,37 @@ DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
+TRACE_EVENT(xprtrdma_op_set_cto,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned long connect,
+ unsigned long reconnect
+ ),
+
+ TP_ARGS(r_xprt, connect, reconnect),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned long, connect)
+ __field(unsigned long, reconnect)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->connect = connect;
+ __entry->reconnect = reconnect;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->connect / HZ, __entry->reconnect / HZ
+ )
+);
+
TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
@@ -470,13 +486,12 @@ TRACE_DEFINE_ENUM(rpcrdma_replych);
TRACE_EVENT(xprtrdma_marshal,
TP_PROTO(
- const struct rpc_rqst *rqst,
- unsigned int hdrlen,
+ const struct rpcrdma_req *req,
unsigned int rtype,
unsigned int wtype
),
- TP_ARGS(rqst, hdrlen, rtype, wtype),
+ TP_ARGS(req, rtype, wtype),
TP_STRUCT__entry(
__field(unsigned int, task_id)
@@ -491,10 +506,12 @@ TRACE_EVENT(xprtrdma_marshal,
),
TP_fast_assign(
+ const struct rpc_rqst *rqst = &req->rl_slot;
+
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->hdrlen = hdrlen;
+ __entry->hdrlen = req->rl_hdrbuf.len;
__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
__entry->pagelen = rqst->rq_snd_buf.page_len;
__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
@@ -538,6 +555,33 @@ TRACE_EVENT(xprtrdma_marshal_failed,
)
);
+TRACE_EVENT(xprtrdma_prepsend_failed,
+ TP_PROTO(const struct rpc_rqst *rqst,
+ int ret
+ ),
+
+ TP_ARGS(rqst, ret),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->ret = ret;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->ret
+ )
+);
+
TRACE_EVENT(xprtrdma_post_send,
TP_PROTO(
const struct rpcrdma_req *req,
@@ -559,7 +603,8 @@ TRACE_EVENT(xprtrdma_post_send,
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->client_id = rqst->rq_task->tk_client ?
+ rqst->rq_task->tk_client->cl_clid : -1;
__entry->req = req;
__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
@@ -698,6 +743,7 @@ TRACE_EVENT(xprtrdma_wc_receive,
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO(
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index abd238d0f7a4..63b1f506ea67 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -19,15 +19,18 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
-#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
-# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
-#else
-# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
-#endif
-/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x010000 /* do not block on IO */
+#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x040000 /* create a huge page mapping */
+#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+
/*
* Flags for mlock
*/
diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
index 653687d9771b..57e8195d0b53 100644
--- a/include/uapi/asm-generic/mman.h
+++ b/include/uapi/asm-generic/mman.h
@@ -9,13 +9,11 @@
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
-/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
+/*
+ * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h
+ * for MAP_HUGETLB usage
+ */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 9acfff0cd153..1be0e798e362 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -846,8 +846,10 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
+#endif
#undef __NR_syscalls
#define __NR_syscalls 436
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index d799858b9e53..11cc57322962 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -219,7 +219,10 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
#define AMDGPU_CTX_PRIORITY_LOW -512
#define AMDGPU_CTX_PRIORITY_NORMAL 0
-/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
+/*
+ * When used in struct drm_amdgpu_ctx_in, a priority above NORMAL requires
+ * CAP_SYS_NICE or DRM_MASTER
+*/
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
@@ -229,6 +232,7 @@ struct drm_amdgpu_ctx_in {
/** For future use, no flags defined so far */
__u32 flags;
__u32 ctx_id;
+ /** AMDGPU_CTX_PRIORITY_* */
__s32 priority;
};
@@ -281,6 +285,7 @@ struct drm_amdgpu_sched_in {
/* AMDGPU_SCHED_OP_* */
__u32 op;
__u32 fd;
+ /** AMDGPU_CTX_PRIORITY_* */
__s32 priority;
__u32 ctx_id;
};
diff --git a/include/uapi/linux/adfs_fs.h b/include/uapi/linux/adfs_fs.h
index 151d93e27ed4..f1a7d67a7323 100644
--- a/include/uapi/linux/adfs_fs.h
+++ b/include/uapi/linux/adfs_fs.h
@@ -29,17 +29,17 @@ struct adfs_discrecord {
__u8 log2sharesize:4;
__u8 unused40:4;
__u8 big_flag:1;
- __u8 unused41:1;
+ __u8 unused41:7;
__u8 nzones_high;
+ __u8 reserved43;
__le32 format_version;
__le32 root_size;
__u8 unused52[60 - 52];
-};
+} __attribute__((packed, aligned(4)));
#define ADFS_DISCRECORD (0xc00)
#define ADFS_DR_OFFSET (0x1c0)
#define ADFS_DR_SIZE 60
#define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3)
-
#endif /* _UAPI_ADFS_FS_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 6f68438aa4ed..fa1c753dcdbc 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -806,7 +806,7 @@ union bpf_attr {
* based on a user-provided identifier for all traffic coming from
* the tasks belonging to the related cgroup. See also the related
* kernel documentation, available from the Linux sources in file
- * *Documentation/cgroup-v1/net_cls.rst*.
+ * *Documentation/admin-guide/cgroup-v1/net_cls.rst*.
*
* The Linux kernel has two versions for cgroups: there are
* cgroups v1 and cgroups v2. Both are available to users, who can
@@ -3248,7 +3248,7 @@ struct bpf_sock_addr {
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
- __u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
+ __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__u32 user_port; /* Allows 4-byte read and write.
@@ -3260,7 +3260,7 @@ struct bpf_sock_addr {
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
- __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
+ __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__bpf_md_ptr(struct bpf_sock *, sk);
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 421239b98db2..34d5b34286fa 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -866,6 +866,8 @@ enum btrfs_raid_types {
#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
BTRFS_BLOCK_GROUP_RAID6)
+#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1)
+
/*
* We need a bit for restriper to be able to tell when chunks of type
* SINGLE are available. This "extended" profile format is used in
diff --git a/include/uapi/linux/coda.h b/include/uapi/linux/coda.h
index 695fade33c64..aa34c2dcae8d 100644
--- a/include/uapi/linux/coda.h
+++ b/include/uapi/linux/coda.h
@@ -86,10 +86,6 @@ typedef unsigned long long u_quad_t;
#define inline
-struct timespec {
- long ts_sec;
- long ts_nsec;
-};
#else /* DJGPP but not KERNEL */
#include <sys/time.h>
typedef unsigned long long u_quad_t;
@@ -110,13 +106,6 @@ typedef unsigned long long u_quad_t;
#define cdev_t dev_t
#endif
-#ifdef __CYGWIN32__
-struct timespec {
- time_t tv_sec; /* seconds */
- long tv_nsec; /* nanoseconds */
-};
-#endif
-
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
typedef signed char int8_t;
@@ -211,6 +200,11 @@ struct CodaFid {
*/
enum coda_vtype { C_VNON, C_VREG, C_VDIR, C_VBLK, C_VCHR, C_VLNK, C_VSOCK, C_VFIFO, C_VBAD };
+struct coda_timespec {
+ int64_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
struct coda_vattr {
long va_type; /* vnode type (for create) */
u_short va_mode; /* files access mode and type */
@@ -220,9 +214,9 @@ struct coda_vattr {
long va_fileid; /* file id */
u_quad_t va_size; /* file size in bytes */
long va_blocksize; /* blocksize preferred for i/o */
- struct timespec va_atime; /* time of last access */
- struct timespec va_mtime; /* time of last modification */
- struct timespec va_ctime; /* time file changed */
+ struct coda_timespec va_atime; /* time of last access */
+ struct coda_timespec va_mtime; /* time of last modification */
+ struct coda_timespec va_ctime; /* time file changed */
u_long va_gen; /* generation number of file */
u_long va_flags; /* flags defined for file */
cdev_t va_rdev; /* device special file represents */
@@ -277,7 +271,8 @@ struct coda_statfs {
#define CODA_STATFS 34
#define CODA_STORE 35
#define CODA_RELEASE 36
-#define CODA_NCALLS 37
+#define CODA_ACCESS_INTENT 37
+#define CODA_NCALLS 38
#define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID)
@@ -287,7 +282,12 @@ struct coda_statfs {
#define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t)
-#define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */
+// CODA_KERNEL_VERSION 0 /* don't care about kernel version number */
+// CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */
+// CODA_KERNEL_VERSION 2 /* venus_lookup gets an extra parameter */
+// CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */
+// CODA_KERNEL_VERSION 4 /* 64-bit timespec */
+#define CODA_KERNEL_VERSION 5 /* access intent support */
/*
* Venus <-> Coda RPC arguments
@@ -295,8 +295,8 @@ struct coda_statfs {
struct coda_in_hdr {
u_int32_t opcode;
u_int32_t unique; /* Keep multiple outstanding msgs distinct */
- pid_t pid;
- pid_t pgid;
+ __kernel_pid_t pid;
+ __kernel_pid_t pgid;
vuid_t uid;
};
@@ -642,6 +642,25 @@ struct coda_statfs_out {
struct coda_statfs stat;
};
+#define CODA_ACCESS_TYPE_READ 1
+#define CODA_ACCESS_TYPE_WRITE 2
+#define CODA_ACCESS_TYPE_MMAP 3
+#define CODA_ACCESS_TYPE_READ_FINISH 4
+#define CODA_ACCESS_TYPE_WRITE_FINISH 5
+
+/* coda_access_intent: NO_OUT */
+struct coda_access_intent_in {
+ struct coda_in_hdr ih;
+ struct CodaFid VFid;
+ int count;
+ int pos;
+ int type;
+};
+
+struct coda_access_intent_out {
+ struct coda_out_hdr out;
+};
+
/*
* Occasionally, we don't cache the fid returned by CODA_LOOKUP.
* For instance, if the fid is inconsistent.
@@ -673,6 +692,7 @@ union inputArgs {
struct coda_open_by_fd_in coda_open_by_fd;
struct coda_open_by_path_in coda_open_by_path;
struct coda_statfs_in coda_statfs;
+ struct coda_access_intent_in coda_access_intent;
};
union outputArgs {
diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h
deleted file mode 100644
index aa6623efd2dd..000000000000
--- a/include/uapi/linux/coda_psdev.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI__CODA_PSDEV_H
-#define _UAPI__CODA_PSDEV_H
-
-#include <linux/magic.h>
-
-#define CODA_PSDEV_MAJOR 67
-#define MAX_CODADEVS 5 /* how many do we allow */
-
-
-/* messages between coda filesystem in kernel and Venus */
-struct upc_req {
- struct list_head uc_chain;
- caddr_t uc_data;
- u_short uc_flags;
- u_short uc_inSize; /* Size is at most 5000 bytes */
- u_short uc_outSize;
- u_short uc_opcode; /* copied from data to save lookup */
- int uc_unique;
- wait_queue_head_t uc_sleep; /* process' wait queue */
-};
-
-#define CODA_REQ_ASYNC 0x1
-#define CODA_REQ_READ 0x2
-#define CODA_REQ_WRITE 0x4
-#define CODA_REQ_ABORT 0x8
-
-#endif /* _UAPI__CODA_PSDEV_H */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 665e18627f78..1274c692e59c 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -92,5 +92,6 @@
#define BALLOON_KVM_MAGIC 0x13661366
#define ZSMALLOC_MAGIC 0x58295829
#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */
+#define Z3FOLD_MAGIC 0x33
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 1f623252abe8..18f185299f47 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1174,7 +1174,7 @@ enum {
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
TCA_TAPRIO_ATTR_FLAGS, /* u32 */
- TCA_TAPRIO_ATTR_TXTIME_DELAY, /* s32 */
+ TCA_TAPRIO_ATTR_TXTIME_DELAY, /* u32 */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index d5a1b8a492b9..a71b6e3b03eb 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -73,6 +73,41 @@ struct seccomp_metadata {
__u64 flags; /* Output: filter's flags */
};
+#define PTRACE_GET_SYSCALL_INFO 0x420e
+#define PTRACE_SYSCALL_INFO_NONE 0
+#define PTRACE_SYSCALL_INFO_ENTRY 1
+#define PTRACE_SYSCALL_INFO_EXIT 2
+#define PTRACE_SYSCALL_INFO_SECCOMP 3
+
+struct ptrace_syscall_info {
+ __u8 op; /* PTRACE_SYSCALL_INFO_* */
+ __u32 arch __attribute__((__aligned__(sizeof(__u32))));
+ __u64 instruction_pointer;
+ __u64 stack_pointer;
+ union {
+ struct {
+ __u64 nr;
+ __u64 args[6];
+ } entry;
+ struct {
+ __s64 rval;
+ __u8 is_error;
+ } exit;
+ struct {
+ __u64 nr;
+ __u64 args[6];
+ __u32 ret_data;
+ } seccomp;
+ };
+};
+
+/*
+ * These values are stored in task->ptrace_message
+ * by tracehook_report_syscall_* to describe the current syscall-stop.
+ */
+#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
+#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
+
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 6d5c3b2d4f4d..348fd0176f75 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -43,5 +43,7 @@
#define VIRTIO_ID_INPUT 18 /* virtio input */
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
+#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_PMEM 27 /* virtio pmem */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
new file mode 100644
index 000000000000..ba1b460c9944
--- /dev/null
+++ b/include/uapi/linux/virtio_iommu.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio-iommu definition v0.9
+ *
+ * Copyright (C) 2018 Arm Ltd.
+ */
+#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
+#define _UAPI_LINUX_VIRTIO_IOMMU_H
+
+#include <linux/types.h>
+
+/* Feature bits */
+#define VIRTIO_IOMMU_F_INPUT_RANGE 0
+#define VIRTIO_IOMMU_F_DOMAIN_BITS 1
+#define VIRTIO_IOMMU_F_MAP_UNMAP 2
+#define VIRTIO_IOMMU_F_BYPASS 3
+#define VIRTIO_IOMMU_F_PROBE 4
+
+struct virtio_iommu_range {
+ __u64 start;
+ __u64 end;
+};
+
+struct virtio_iommu_config {
+ /* Supported page sizes */
+ __u64 page_size_mask;
+ /* Supported IOVA range */
+ struct virtio_iommu_range input_range;
+ /* Max domain ID size */
+ __u8 domain_bits;
+ __u8 padding[3];
+ /* Probe buffer size */
+ __u32 probe_size;
+};
+
+/* Request types */
+#define VIRTIO_IOMMU_T_ATTACH 0x01
+#define VIRTIO_IOMMU_T_DETACH 0x02
+#define VIRTIO_IOMMU_T_MAP 0x03
+#define VIRTIO_IOMMU_T_UNMAP 0x04
+#define VIRTIO_IOMMU_T_PROBE 0x05
+
+/* Status types */
+#define VIRTIO_IOMMU_S_OK 0x00
+#define VIRTIO_IOMMU_S_IOERR 0x01
+#define VIRTIO_IOMMU_S_UNSUPP 0x02
+#define VIRTIO_IOMMU_S_DEVERR 0x03
+#define VIRTIO_IOMMU_S_INVAL 0x04
+#define VIRTIO_IOMMU_S_RANGE 0x05
+#define VIRTIO_IOMMU_S_NOENT 0x06
+#define VIRTIO_IOMMU_S_FAULT 0x07
+
+struct virtio_iommu_req_head {
+ __u8 type;
+ __u8 reserved[3];
+};
+
+struct virtio_iommu_req_tail {
+ __u8 status;
+ __u8 reserved[3];
+};
+
+struct virtio_iommu_req_attach {
+ struct virtio_iommu_req_head head;
+ __le32 domain;
+ __le32 endpoint;
+ __u8 reserved[8];
+ struct virtio_iommu_req_tail tail;
+};
+
+struct virtio_iommu_req_detach {
+ struct virtio_iommu_req_head head;
+ __le32 domain;
+ __le32 endpoint;
+ __u8 reserved[8];
+ struct virtio_iommu_req_tail tail;
+};
+
+#define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
+#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
+#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2)
+#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3)
+
+#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
+ VIRTIO_IOMMU_MAP_F_WRITE | \
+ VIRTIO_IOMMU_MAP_F_EXEC | \
+ VIRTIO_IOMMU_MAP_F_MMIO)
+
+struct virtio_iommu_req_map {
+ struct virtio_iommu_req_head head;
+ __le32 domain;
+ __le64 virt_start;
+ __le64 virt_end;
+ __le64 phys_start;
+ __le32 flags;
+ struct virtio_iommu_req_tail tail;
+};
+
+struct virtio_iommu_req_unmap {
+ struct virtio_iommu_req_head head;
+ __le32 domain;
+ __le64 virt_start;
+ __le64 virt_end;
+ __u8 reserved[4];
+ struct virtio_iommu_req_tail tail;
+};
+
+#define VIRTIO_IOMMU_PROBE_T_NONE 0
+#define VIRTIO_IOMMU_PROBE_T_RESV_MEM 1
+
+#define VIRTIO_IOMMU_PROBE_T_MASK 0xfff
+
+struct virtio_iommu_probe_property {
+ __le16 type;
+ __le16 length;
+};
+
+#define VIRTIO_IOMMU_RESV_MEM_T_RESERVED 0
+#define VIRTIO_IOMMU_RESV_MEM_T_MSI 1
+
+struct virtio_iommu_probe_resv_mem {
+ struct virtio_iommu_probe_property head;
+ __u8 subtype;
+ __u8 reserved[3];
+ __le64 start;
+ __le64 end;
+};
+
+struct virtio_iommu_req_probe {
+ struct virtio_iommu_req_head head;
+ __le32 endpoint;
+ __u8 reserved[64];
+
+ __u8 properties[];
+
+ /*
+ * Tail follows the variable-length properties array. No padding,
+ * property lengths are all aligned on 8 bytes.
+ */
+};
+
+/* Fault types */
+#define VIRTIO_IOMMU_FAULT_R_UNKNOWN 0
+#define VIRTIO_IOMMU_FAULT_R_DOMAIN 1
+#define VIRTIO_IOMMU_FAULT_R_MAPPING 2
+
+#define VIRTIO_IOMMU_FAULT_F_READ (1 << 0)
+#define VIRTIO_IOMMU_FAULT_F_WRITE (1 << 1)
+#define VIRTIO_IOMMU_FAULT_F_EXEC (1 << 2)
+#define VIRTIO_IOMMU_FAULT_F_ADDRESS (1 << 8)
+
+struct virtio_iommu_fault {
+ __u8 reason;
+ __u8 reserved[3];
+ __le32 flags;
+ __le32 endpoint;
+ __u8 reserved2[4];
+ __le64 address;
+};
+
+#endif
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
new file mode 100644
index 000000000000..9a63ed6d062f
--- /dev/null
+++ b/include/uapi/linux/virtio_pmem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Definitions for virtio-pmem devices.
+ *
+ * Copyright (C) 2019 Red Hat, Inc.
+ *
+ * Author(s): Pankaj Gupta <pagupta@redhat.com>
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_PMEM_H
+#define _UAPI_LINUX_VIRTIO_PMEM_H
+
+#include <linux/types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+struct virtio_pmem_config {
+ __u64 start;
+ __u64 size;
+};
+
+#define VIRTIO_PMEM_REQ_TYPE_FLUSH 0
+
+struct virtio_pmem_resp {
+ /* Host return status corresponding to flush request */
+ __le32 ret;
+};
+
+struct virtio_pmem_req {
+ /* command type */
+ __le32 type;
+};
+
+#endif
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 64c14cb0022f..b8bb285f6b2a 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -36,7 +36,7 @@
#include <linux/types.h>
#include <linux/ioctl.h>
-/* Documentation/ioctl/ioctl-number.txt */
+/* Documentation/ioctl/ioctl-number.rst */
#define RDMA_IOCTL_MAGIC 0x1b
#define RDMA_VERBS_IOCTL \
_IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 4914b93a23f2..6fb95aa19405 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -27,16 +27,6 @@ void balloon_set_new_target(unsigned long target);
int alloc_xenballooned_pages(int nr_pages, struct page **pages);
void free_xenballooned_pages(int nr_pages, struct page **pages);
-struct device;
-#ifdef CONFIG_XEN_SELFBALLOONING
-extern int register_xen_selfballooning(struct device *dev);
-#else
-static inline int register_xen_selfballooning(struct device *dev)
-{
- return -ENOSYS;
-}
-#endif
-
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
#else
diff --git a/include/xen/events.h b/include/xen/events.h
index a48897199975..c0e6a0598397 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -3,6 +3,7 @@
#define _XEN_EVENTS_H
#include <linux/interrupt.h>
+#include <linux/irq.h>
#ifdef CONFIG_PCI_MSI
#include <linux/msi.h>
#endif
@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq);
-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
+int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(int port)
{
diff --git a/include/xen/tmem.h b/include/xen/tmem.h
deleted file mode 100644
index c80bafe31f14..000000000000
--- a/include/xen/tmem.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XEN_TMEM_H
-#define _XEN_TMEM_H
-
-#include <linux/types.h>
-
-#ifdef CONFIG_XEN_TMEM_MODULE
-#define tmem_enabled true
-#else
-/* defined in drivers/xen/tmem.c */
-extern bool tmem_enabled;
-#endif
-
-#ifdef CONFIG_XEN_SELFBALLOONING
-extern int xen_selfballoon_init(bool, bool);
-#endif
-
-#endif /* _XEN_TMEM_H */