diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 15 | ||||
-rw-r--r-- | mm/mlock.c | 21 | ||||
-rw-r--r-- | mm/mmap.c | 21 | ||||
-rw-r--r-- | mm/nommu.c | 7 | ||||
-rw-r--r-- | mm/oom_kill.c | 16 | ||||
-rw-r--r-- | mm/page-writeback.c | 3 |
6 files changed, 60 insertions, 23 deletions
diff --git a/mm/memory.c b/mm/memory.c index b6e5fd23cc5a..2ed2267439df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo { address &= PAGE_MASK; if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - address -= PAGE_SIZE; - if (find_vma(vma->vm_mm, address) != vma) - return -ENOMEM; + struct vm_area_struct *prev = vma->vm_prev; + + /* + * Is there a mapping abutting this one below? + * + * That's only ok if it's the same stack mapping + * that has gotten split.. + */ + if (prev && prev->vm_end == address) + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - expand_stack(vma, address); + expand_stack(vma, address - PAGE_SIZE); } return 0; } diff --git a/mm/mlock.c b/mm/mlock.c index 49e5e4cb8232..cbae7c5b9568 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page) } } +/* Is the vma a continuation of the stack vma above it? */ +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +} + +static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_stack_continue(vma->vm_prev, addr); +} + /** * __mlock_vma_pages_range() - mlock a range of pages in the vma. * @vma: target vma @@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, gup_flags |= FOLL_WRITE; /* We don't try to access the guard page of a stack vma */ - if (vma->vm_flags & VM_GROWSDOWN) { - if (start == vma->vm_start) { - start += PAGE_SIZE; - nr_pages--; - } + if (stack_guard_page(vma, start)) { + addr += PAGE_SIZE; + nr_pages--; } while (nr_pages > 0) { diff --git a/mm/mmap.c b/mm/mmap.c index 31003338b978..331e51af38c9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -388,17 +388,23 @@ static inline void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent) { + struct vm_area_struct *next; + + vma->vm_prev = prev; if (prev) { - vma->vm_next = prev->vm_next; + next = prev->vm_next; prev->vm_next = vma; } else { mm->mmap = vma; if (rb_parent) - vma->vm_next = rb_entry(rb_parent, + next = rb_entry(rb_parent, struct vm_area_struct, vm_rb); else - vma->vm_next = NULL; + next = NULL; } + vma->vm_next = next; + if (next) + next->vm_prev = vma; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, @@ -483,7 +489,11 @@ static inline void __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { - prev->vm_next = vma->vm_next; + struct vm_area_struct *next = vma->vm_next; + + prev->vm_next = next; + if (next) + next->vm_prev = prev; rb_erase(&vma->vm_rb, &mm->mm_rb); if (mm->mmap_cache == vma) mm->mmap_cache = prev; @@ -1915,6 +1925,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); + vma->vm_prev = NULL; do { rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; @@ -1922,6 +1933,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; + if (vma) + vma->vm_prev = prev; tail_vma->vm_next = NULL; if (mm->unmap_area == arch_unmap_area) addr = prev ? prev->vm_end : mm->mmap_base; diff --git a/mm/nommu.c b/mm/nommu.c index efa9a380335e..88ff091eb07a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -604,7 +604,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) */ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *pvma, **pp; + struct vm_area_struct *pvma, **pp, *next; struct address_space *mapping; struct rb_node **p, *parent; @@ -664,8 +664,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) break; } - vma->vm_next = *pp; + next = *pp; *pp = vma; + vma->vm_next = next; + if (next) + next->vm_prev = vma; } /* diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5014e50644d1..fc81cb22869e 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -372,7 +372,7 @@ static void dump_tasks(const struct mem_cgroup *mem) } pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", - task->pid, __task_cred(task)->uid, task->tgid, + task->pid, task_uid(task), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), task_cpu(task), task->signal->oom_adj, task->signal->oom_score_adj, task->comm); @@ -401,10 +401,9 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) { p = find_lock_task_mm(p); - if (!p) { - task_unlock(p); + if (!p) return 1; - } + pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", task_pid_nr(p), p->comm, K(p->mm->total_vm), K(get_mm_counter(p->mm, MM_ANONPAGES)), @@ -647,6 +646,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, unsigned long freed = 0; unsigned int points; enum oom_constraint constraint = CONSTRAINT_NONE; + int killed = 0; blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) @@ -684,7 +684,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, nodemask, "Out of memory (oom_kill_allocating_task)")) - return; + goto out; } retry: @@ -692,7 +692,7 @@ retry: constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : NULL); if (PTR_ERR(p) == -1UL) - return; + goto out; /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { @@ -704,13 +704,15 @@ retry: if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, nodemask, "Out of memory")) goto retry; + killed = 1; +out: read_unlock(&tasklist_lock); /* * Give "p" a good chance of killing itself before we * retry to allocate memory unless "p" is current */ - if (!test_thread_flag(TIF_MEMDIE)) + if (killed && !test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7262aacea8a2..c09ef5219cbe 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -836,7 +836,8 @@ void tag_pages_for_writeback(struct address_space *mapping, spin_unlock_irq(&mapping->tree_lock); WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); cond_resched(); - } while (tagged >= WRITEBACK_TAG_BATCH); + /* We check 'start' to handle wrapping when end == ~0UL */ + } while (tagged >= WRITEBACK_TAG_BATCH && start); } EXPORT_SYMBOL(tag_pages_for_writeback); |