summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-07-27 12:35:35 +0200
committerIngo Molnar <mingo@kernel.org>2016-07-27 12:35:35 +0200
commitdf15929f8f5c69e987c31bf016eca4a38dba46f0 (patch)
treecfb5fe5e8ebff4cabd70557a304f355546943cc9 /mm
parentefaad554b4ffae1840a2759e09e21325ddbc8b05 (diff)
parent37e13a1ebe32c4fbfbdb5413f42eb6e71d8b28a4 (diff)
downloadlinux-df15929f8f5c69e987c31bf016eca4a38dba46f0.tar.bz2
Merge branch 'linus' into x86/microcode, to pick up merge window changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c36
-rw-r--r--mm/huge_memory.c38
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/kasan/quarantine.c29
-rw-r--r--mm/memcontrol.c82
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/rmap.c12
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/workingset.c2
12 files changed, 142 insertions, 90 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 79bfe0e06907..7bc04778f84d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1009,8 +1009,6 @@ static void isolate_freepages(struct compact_control *cc)
block_end_pfn = block_start_pfn,
block_start_pfn -= pageblock_nr_pages,
isolate_start_pfn = block_start_pfn) {
- unsigned long isolated;
-
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need
@@ -1034,36 +1032,30 @@ static void isolate_freepages(struct compact_control *cc)
continue;
/* Found a block suitable for isolating free pages from. */
- isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freelist, false);
- /* If isolation failed early, do not continue needlessly */
- if (!isolated && isolate_start_pfn < block_end_pfn &&
- cc->nr_migratepages > cc->nr_freepages)
- break;
+ isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+ freelist, false);
/*
- * If we isolated enough freepages, or aborted due to async
- * compaction being contended, terminate the loop.
- * Remember where the free scanner should restart next time,
- * which is where isolate_freepages_block() left off.
- * But if it scanned the whole pageblock, isolate_start_pfn
- * now points at block_end_pfn, which is the start of the next
- * pageblock.
- * In that case we will however want to restart at the start
- * of the previous pageblock.
+ * If we isolated enough freepages, or aborted due to lock
+ * contention, terminate.
*/
if ((cc->nr_freepages >= cc->nr_migratepages)
|| cc->contended) {
- if (isolate_start_pfn >= block_end_pfn)
+ if (isolate_start_pfn >= block_end_pfn) {
+ /*
+ * Restart at previous pageblock if more
+ * freepages can be isolated next time.
+ */
isolate_start_pfn =
block_start_pfn - pageblock_nr_pages;
+ }
break;
- } else {
+ } else if (isolate_start_pfn < block_end_pfn) {
/*
- * isolate_freepages_block() should not terminate
- * prematurely unless contended, or isolated enough
+ * If isolation failed early, do not continue
+ * needlessly.
*/
- VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+ break;
}
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9ed58530f695..343a2b7e57aa 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (next - addr != HPAGE_PMD_SIZE) {
get_page(page);
spin_unlock(ptl);
- if (split_huge_page(page)) {
- put_page(page);
- unlock_page(page);
- goto out_unlocked;
- }
+ split_huge_page(page);
put_page(page);
unlock_page(page);
- ret = 1;
goto out_unlocked;
}
@@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
}
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze)
+ unsigned long address, bool freeze, struct page *page)
{
spinlock_t *ptl;
struct mm_struct *mm = vma->vm_mm;
@@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
ptl = pmd_lock(mm, pmd);
+
+ /*
+ * If caller asks to setup a migration entries, we need a page to check
+ * pmd against. Otherwise we can end up replacing wrong page.
+ */
+ VM_BUG_ON(freeze && !page);
+ if (page && page != pmd_page(*pmd))
+ goto out;
+
if (pmd_trans_huge(*pmd)) {
- struct page *page = pmd_page(*pmd);
+ page = pmd_page(*pmd);
if (PageMlocked(page))
clear_page_mlock(page);
} else if (!pmd_devmap(*pmd))
@@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
return;
pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
- return;
- /*
- * If caller asks to setup a migration entries, we need a page to check
- * pmd against. Otherwise we can end up replacing wrong page.
- */
- VM_BUG_ON(freeze && !page);
- if (page && page != pmd_page(*pmd))
- return;
-
- /*
- * Caller holds the mmap_sem write mode or the anon_vma lock,
- * so a huge pmd cannot materialize from under us (khugepaged
- * holds both the mmap_sem write mode and the anon_vma lock
- * write mode).
- */
- __split_huge_pmd(vma, pmd, address, freeze);
+ __split_huge_pmd(vma, pmd, address, freeze, page);
}
void vma_adjust_trans_huge(struct vm_area_struct *vma,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c1f3c0be150a..addfe4accc07 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3383,7 +3383,7 @@ retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
- page_move_anon_rmap(old_page, vma, address);
+ page_move_anon_rmap(old_page, vma);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 4973505a9bdd..65793f150d1f 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -238,30 +238,23 @@ static void qlist_move_cache(struct qlist_head *from,
struct qlist_head *to,
struct kmem_cache *cache)
{
- struct qlist_node *prev = NULL, *curr;
+ struct qlist_node *curr;
if (unlikely(qlist_empty(from)))
return;
curr = from->head;
+ qlist_init(from);
while (curr) {
- struct qlist_node *qlink = curr;
- struct kmem_cache *obj_cache = qlink_to_cache(qlink);
-
- if (obj_cache == cache) {
- if (unlikely(from->head == qlink)) {
- from->head = curr->next;
- prev = curr;
- } else
- prev->next = curr->next;
- if (unlikely(from->tail == qlink))
- from->tail = curr->next;
- from->bytes -= cache->size;
- qlist_put(to, qlink, cache->size);
- } else {
- prev = curr;
- }
- curr = curr->next;
+ struct qlist_node *next = curr->next;
+ struct kmem_cache *obj_cache = qlink_to_cache(curr);
+
+ if (obj_cache == cache)
+ qlist_put(to, curr, obj_cache->size);
+ else
+ qlist_put(from, curr, obj_cache->size);
+
+ curr = next;
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac8664db3823..5339c89dff63 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4057,6 +4057,60 @@ static struct cftype mem_cgroup_legacy_files[] = {
{ }, /* terminate */
};
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+ atomic_inc(&memcg->id.ref);
+}
+
+static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+ if (atomic_dec_and_test(&memcg->id.ref)) {
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
+ memcg->id.id = 0;
+
+ /* Memcg ID pins CSS */
+ css_put(&memcg->css);
+ }
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ return idr_find(&mem_cgroup_idr, id);
+}
+
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
@@ -4116,6 +4170,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (!memcg)
return NULL;
+ memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+ 1, MEM_CGROUP_ID_MAX,
+ GFP_KERNEL);
+ if (memcg->id.id < 0)
+ goto fail;
+
memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
if (!memcg->stat)
goto fail;
@@ -4142,8 +4202,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&memcg->cgwb_list);
#endif
+ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
return memcg;
fail:
+ if (memcg->id.id > 0)
+ idr_remove(&mem_cgroup_idr, memcg->id.id);
mem_cgroup_free(memcg);
return NULL;
}
@@ -4206,12 +4269,11 @@ fail:
return ERR_PTR(-ENOMEM);
}
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
- if (css->id > MEM_CGROUP_ID_MAX)
- return -ENOSPC;
-
+ /* Online state pins memcg ID, memcg ID pins CSS */
+ mem_cgroup_id_get(mem_cgroup_from_css(css));
+ css_get(css);
return 0;
}
@@ -4234,6 +4296,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
memcg_offline_kmem(memcg);
wb_memcg_offline(memcg);
+
+ mem_cgroup_id_put(memcg);
}
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
@@ -5756,6 +5820,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!memcg)
return;
+ mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
@@ -5774,6 +5839,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
VM_BUG_ON(!irqs_disabled());
mem_cgroup_charge_statistics(memcg, page, false, -1);
memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put(&memcg->css);
}
/*
@@ -5804,11 +5872,11 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
!page_counter_try_charge(&memcg->swap, 1, &counter))
return -ENOMEM;
+ mem_cgroup_id_get(memcg);
oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
VM_BUG_ON_PAGE(oldid, page);
mem_cgroup_swap_statistics(memcg, true);
- css_get(&memcg->css);
return 0;
}
@@ -5837,7 +5905,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
page_counter_uncharge(&memcg->memsw, 1);
}
mem_cgroup_swap_statistics(memcg, false);
- css_put(&memcg->css);
+ mem_cgroup_id_put(memcg);
}
rcu_read_unlock();
}
diff --git a/mm/memory.c b/mm/memory.c
index cd1f29e4897e..9e046819e619 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Protected against the rmap code by
* the page lock.
*/
- page_move_anon_rmap(compound_head(old_page),
- vma, address);
+ page_move_anon_rmap(old_page, vma);
}
unlock_page(old_page);
return wp_page_reuse(mm, vma, address, page_table, ptl,
diff --git a/mm/mmap.c b/mm/mmap.c
index de2c1769cc68..234edffec1d0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2943,9 +2943,19 @@ static const char *special_mapping_name(struct vm_area_struct *vma)
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
}
+static int special_mapping_mremap(struct vm_area_struct *new_vma)
+{
+ struct vm_special_mapping *sm = new_vma->vm_private_data;
+
+ if (sm->mremap)
+ return sm->mremap(sm, new_vma);
+ return 0;
+}
+
static const struct vm_operations_struct special_mapping_vmops = {
.close = special_mapping_close,
.fault = special_mapping_fault,
+ .mremap = special_mapping_mremap,
.name = special_mapping_name,
};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6903b695ebae..8b3e1341b754 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
- if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+ int nid = early_pfn_to_nid(pfn);
+
+ if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
return true;
return false;
@@ -1273,7 +1275,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
spin_lock(&early_pfn_lock);
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
if (nid < 0)
- nid = 0;
+ nid = first_online_node;
spin_unlock(&early_pfn_lock);
return nid;
diff --git a/mm/rmap.c b/mm/rmap.c
index 0ea5d9071b32..701b93fea2a0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
* @vma: the vma the page belongs to
- * @address: the user virtual address mapped
*
* When a page belongs exclusively to one process after a COW event,
* that page can be moved into the anon_vma that belongs to just that
* process, so the rmap code will not search the parent or sibling
* processes.
*/
-void page_move_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
+ page = compound_head(page);
+
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_VMA(!anon_vma, vma);
- if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
- address &= HPAGE_PMD_MASK;
- VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
/*
@@ -1427,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
goto out;
}
- pte = page_check_address(page, mm, address, &ptl, 0);
+ pte = page_check_address(page, mm, address, &ptl,
+ PageTransCompound(page));
if (!pte)
goto out;
diff --git a/mm/shmem.c b/mm/shmem.c
index 24463b67b6ef..171dee7a131f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2225,9 +2225,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
error = shmem_getpage(inode, index, &page, SGP_FALLOC);
if (error) {
/* Remove the !PageUptodate pages we added */
- shmem_undo_range(inode,
- (loff_t)start << PAGE_SHIFT,
- ((loff_t)index << PAGE_SHIFT) - 1, true);
+ if (index > start) {
+ shmem_undo_range(inode,
+ (loff_t)start << PAGE_SHIFT,
+ ((loff_t)index << PAGE_SHIFT) - 1, true);
+ }
goto undone;
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index a65dad7fdcd1..82317abb03ed 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -526,8 +526,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
goto out_unlock;
cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
- cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
- css->id, memcg_name_buf);
+ cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+ css->serial_nr, memcg_name_buf);
if (!cache_name)
goto out_unlock;
diff --git a/mm/workingset.c b/mm/workingset.c
index 8a75f8d2916a..577277546d98 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -491,7 +491,7 @@ static int __init workingset_init(void)
max_order = fls_long(totalram_pages - 1);
if (max_order > timestamp_bits)
bucket_order = max_order - timestamp_bits;
- printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+ pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);