diff options
author | Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> | 2012-03-21 16:33:57 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 17:54:57 -0700 |
commit | 025c5b2451e42c9e8dfdecd6dc84956ce8f321b5 (patch) | |
tree | 423b4ef1a0ce021360304a80f6e0ba902581a3ad /mm/huge_memory.c | |
parent | 5aaabe831eb527e0d9284f0745d830a755f70393 (diff) | |
download | linux-025c5b2451e42c9e8dfdecd6dc84956ce8f321b5.tar.bz2 |
thp: optimize away unnecessary page table locking
Currently when we check if we can handle thp as it is or we need to split
it into regular sized pages, we hold page table lock prior to check
whether a given pmd is mapping thp or not. Because of this, when it's not
"huge pmd" we suffer from unnecessary lock/unlock overhead. To remove it,
this patch introduces a optimized check function and replace several
similar logics with it.
[akpm@linux-foundation.org: checkpatch fixes]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 125 |
1 files changed, 59 insertions, 66 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8f7fc394f636..f0e5306eeb55 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1031,32 +1031,23 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, { int ret = 0; - spin_lock(&tlb->mm->page_table_lock); - if (likely(pmd_trans_huge(*pmd))) { - if (unlikely(pmd_trans_splitting(*pmd))) { - spin_unlock(&tlb->mm->page_table_lock); - wait_split_huge_page(vma->anon_vma, - pmd); - } else { - struct page *page; - pgtable_t pgtable; - pgtable = get_pmd_huge_pte(tlb->mm); - page = pmd_page(*pmd); - pmd_clear(pmd); - tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - page_remove_rmap(page); - VM_BUG_ON(page_mapcount(page) < 0); - add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); - VM_BUG_ON(!PageHead(page)); - tlb->mm->nr_ptes--; - spin_unlock(&tlb->mm->page_table_lock); - tlb_remove_page(tlb, page); - pte_free(tlb->mm, pgtable); - ret = 1; - } - } else + if (__pmd_trans_huge_lock(pmd, vma) == 1) { + struct page *page; + pgtable_t pgtable; + pgtable = get_pmd_huge_pte(tlb->mm); + page = pmd_page(*pmd); + pmd_clear(pmd); + tlb_remove_pmd_tlb_entry(tlb, pmd, addr); + page_remove_rmap(page); + VM_BUG_ON(page_mapcount(page) < 0); + add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); + VM_BUG_ON(!PageHead(page)); + tlb->mm->nr_ptes--; spin_unlock(&tlb->mm->page_table_lock); - + tlb_remove_page(tlb, page); + pte_free(tlb->mm, pgtable); + ret = 1; + } return ret; } @@ -1066,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, { int ret = 0; - spin_lock(&vma->vm_mm->page_table_lock); - if (likely(pmd_trans_huge(*pmd))) { - ret = !pmd_trans_splitting(*pmd); - spin_unlock(&vma->vm_mm->page_table_lock); - if (unlikely(!ret)) - wait_split_huge_page(vma->anon_vma, pmd); - else { - /* - * All logical pages in the range are present - * if backed by a huge page. - */ - memset(vec, 1, (end - addr) >> PAGE_SHIFT); - } - } else + if (__pmd_trans_huge_lock(pmd, vma) == 1) { + /* + * All logical pages in the range are present + * if backed by a huge page. + */ spin_unlock(&vma->vm_mm->page_table_lock); + memset(vec, 1, (end - addr) >> PAGE_SHIFT); + ret = 1; + } return ret; } @@ -1110,20 +1095,11 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, goto out; } - spin_lock(&mm->page_table_lock); - if (likely(pmd_trans_huge(*old_pmd))) { - if (pmd_trans_splitting(*old_pmd)) { - spin_unlock(&mm->page_table_lock); - wait_split_huge_page(vma->anon_vma, old_pmd); - ret = -1; - } else { - pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); - VM_BUG_ON(!pmd_none(*new_pmd)); - set_pmd_at(mm, new_addr, new_pmd, pmd); - spin_unlock(&mm->page_table_lock); - ret = 1; - } - } else { + ret = __pmd_trans_huge_lock(old_pmd, vma); + if (ret == 1) { + pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); + VM_BUG_ON(!pmd_none(*new_pmd)); + set_pmd_at(mm, new_addr, new_pmd, pmd); spin_unlock(&mm->page_table_lock); } out: @@ -1136,24 +1112,41 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, struct mm_struct *mm = vma->vm_mm; int ret = 0; - spin_lock(&mm->page_table_lock); + if (__pmd_trans_huge_lock(pmd, vma) == 1) { + pmd_t entry; + entry = pmdp_get_and_clear(mm, addr, pmd); + entry = pmd_modify(entry, newprot); + set_pmd_at(mm, addr, pmd, entry); + spin_unlock(&vma->vm_mm->page_table_lock); + ret = 1; + } + + return ret; +} + +/* + * Returns 1 if a given pmd maps a stable (not under splitting) thp. + * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. + * + * Note that if it returns 1, this routine returns without unlocking page + * table locks. So callers must unlock them. + */ +int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) +{ + spin_lock(&vma->vm_mm->page_table_lock); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { - spin_unlock(&mm->page_table_lock); + spin_unlock(&vma->vm_mm->page_table_lock); wait_split_huge_page(vma->anon_vma, pmd); + return -1; } else { - pmd_t entry; - - entry = pmdp_get_and_clear(mm, addr, pmd); - entry = pmd_modify(entry, newprot); - set_pmd_at(mm, addr, pmd, entry); - spin_unlock(&vma->vm_mm->page_table_lock); - ret = 1; + /* Thp mapped by 'pmd' is stable, so we can + * handle it as it is. */ + return 1; } - } else - spin_unlock(&vma->vm_mm->page_table_lock); - - return ret; + } + spin_unlock(&vma->vm_mm->page_table_lock); + return 0; } pmd_t *page_check_address_pmd(struct page *page, |