summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/x86/kernel/machine_kexec_64.c12
-rw-r--r--mm/hugetlb_vmemmap.c2
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/oom_kill.c58
-rw-r--r--mm/page_isolation.c2
7 files changed, 50 insertions, 35 deletions
diff --git a/.mailmap b/.mailmap
index 9ba38a82aba4..825fae8e6b7b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -201,6 +201,8 @@ Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
<josh@joshtriplett.org> <josht@us.ibm.com>
<josh@joshtriplett.org> <josht@vnet.ibm.com>
+Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@redhat.com>
+Josh Poimboeuf <jpoimboe@kernel.org> <jpoimboe@us.ibm.com>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index b54288db77aa..a6d3bd9d2a8d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -22002,6 +22002,13 @@ L: linux-mm@kvack.org
S: Maintained
F: mm/zbud.c
+Z3FOLD COMPRESSED PAGE ALLOCATOR
+M: Vitaly Wool <vitaly.wool@konsulko.com>
+R: Miaohe Lin <linmiaohe@huawei.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: mm/z3fold.c
+
ZD1211RW WIRELESS DRIVER
M: Ulrich Kunitz <kune@deine-taler.de>
L: linux-wireless@vger.kernel.org
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 566bb8e17149..0611fd83858e 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -376,9 +376,6 @@ void machine_kexec(struct kimage *image)
#ifdef CONFIG_KEXEC_FILE
void *arch_kexec_kernel_image_load(struct kimage *image)
{
- vfree(image->elf_headers);
- image->elf_headers = NULL;
-
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
@@ -514,6 +511,15 @@ overflow:
(int)ELF64_R_TYPE(rel[i].r_info), value);
return -ENOEXEC;
}
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
#endif /* CONFIG_KEXEC_FILE */
static int
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index fcd9f7872064..1089ea8a9c98 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -33,7 +33,7 @@ DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static enum vmemmap_optimize_mode vmemmap_optimize_mode =
- IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
+ IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
{
diff --git a/mm/memremap.c b/mm/memremap.c
index 2b92e97cb25b..b870a659eee6 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -214,7 +214,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
error = -EINVAL;
- goto err_pfn_remap;
+ goto err_kasan;
}
mem_hotplug_begin();
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 8a70bca67c94..3c6cf9e3cd66 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -56,35 +56,6 @@ static int sysctl_panic_on_oom;
static int sysctl_oom_kill_allocating_task;
static int sysctl_oom_dump_tasks = 1;
-#ifdef CONFIG_SYSCTL
-static struct ctl_table vm_oom_kill_table[] = {
- {
- .procname = "panic_on_oom",
- .data = &sysctl_panic_on_oom,
- .maxlen = sizeof(sysctl_panic_on_oom),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_TWO,
- },
- {
- .procname = "oom_kill_allocating_task",
- .data = &sysctl_oom_kill_allocating_task,
- .maxlen = sizeof(sysctl_oom_kill_allocating_task),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "oom_dump_tasks",
- .data = &sysctl_oom_dump_tasks,
- .maxlen = sizeof(sysctl_oom_dump_tasks),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {}
-};
-#endif
-
/*
* Serializes oom killer invocations (out_of_memory()) from all contexts to
* prevent from over eager oom killing (e.g. when the oom killer is invoked
@@ -729,6 +700,35 @@ static void queue_oom_reaper(struct task_struct *tsk)
add_timer(&tsk->oom_reaper_timer);
}
+#ifdef CONFIG_SYSCTL
+static struct ctl_table vm_oom_kill_table[] = {
+ {
+ .procname = "panic_on_oom",
+ .data = &sysctl_panic_on_oom,
+ .maxlen = sizeof(sysctl_panic_on_oom),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "oom_kill_allocating_task",
+ .data = &sysctl_oom_kill_allocating_task,
+ .maxlen = sizeof(sysctl_oom_kill_allocating_task),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "oom_dump_tasks",
+ .data = &sysctl_oom_dump_tasks,
+ .maxlen = sizeof(sysctl_oom_dump_tasks),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+#endif
+
static int __init oom_init(void)
{
oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 6021f8444b5a..d200d41ad0d3 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -385,9 +385,9 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* above do the rest. If migration is not possible, just fail.
*/
if (PageCompound(page)) {
- unsigned long nr_pages = compound_nr(page);
struct page *head = compound_head(page);
unsigned long head_pfn = page_to_pfn(head);
+ unsigned long nr_pages = compound_nr(head);
if (head_pfn + nr_pages <= boundary_pfn) {
pfn = head_pfn + nr_pages;