diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 18:16:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-29 21:40:40 -0700 |
commit | c74df32c724a1652ad8399b4891bb02c9d43743a (patch) | |
tree | 5a79d56fdcf7dc2053a277dbf6db7c3b339e9659 /mm/fremap.c | |
parent | 1bb3630e89cb8a7b3d3807629c20c5bad88290ff (diff) | |
download | linux-c74df32c724a1652ad8399b4891bb02c9d43743a.tar.bz2 |
[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary
bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not
to hold page_table_lock, whether it's on init_mm or a user mm; take
page_table_lock internally to check if a racing task already allocated.
Convert their callers from common code. But avoid coming back to change them
again later: instead of moving the spin_lock(&mm->page_table_lock) down,
switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which
encapsulate the mapping+locking and unlocking+unmapping together, and in the
end may use alternatives to the mm page_table_lock itself.
These callers all hold mmap_sem (some exclusively, some not), so at no level
can a page table be whipped away from beneath them; and pte_alloc uses the
"atomic" pmd_present to test whether it needs to allocate. It appears that on
all arches we can safely descend without page_table_lock.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/fremap.c')
-rw-r--r-- | mm/fremap.c | 48 |
1 files changed, 18 insertions, 30 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index 49719a35769a..d862be3bc3e3 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -63,23 +63,20 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, pud_t *pud; pgd_t *pgd; pte_t pte_val; + spinlock_t *ptl; BUG_ON(vma->vm_flags & VM_RESERVED); pgd = pgd_offset(mm, addr); - spin_lock(&mm->page_table_lock); - pud = pud_alloc(mm, pgd, addr); if (!pud) - goto err_unlock; - + goto out; pmd = pmd_alloc(mm, pud, addr); if (!pmd) - goto err_unlock; - - pte = pte_alloc_map(mm, pmd, addr); + goto out; + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) - goto err_unlock; + goto out; /* * This page may have been truncated. Tell the @@ -89,10 +86,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, inode = vma->vm_file->f_mapping->host; size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (!page->mapping || page->index >= size) - goto err_unlock; + goto unlock; err = -ENOMEM; if (page_mapcount(page) > INT_MAX/2) - goto err_unlock; + goto unlock; if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte)) inc_mm_counter(mm, file_rss); @@ -101,17 +98,15 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, addr, pte, mk_pte(page, prot)); page_add_file_rmap(page); pte_val = *pte; - pte_unmap(pte); update_mmu_cache(vma, addr, pte_val); - err = 0; -err_unlock: - spin_unlock(&mm->page_table_lock); +unlock: + pte_unmap_unlock(pte, ptl); +out: return err; } EXPORT_SYMBOL(install_page); - /* * Install a file pte to a given virtual memory address, release any * previously existing mapping. @@ -125,23 +120,20 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, pud_t *pud; pgd_t *pgd; pte_t pte_val; + spinlock_t *ptl; BUG_ON(vma->vm_flags & VM_RESERVED); pgd = pgd_offset(mm, addr); - spin_lock(&mm->page_table_lock); - pud = pud_alloc(mm, pgd, addr); if (!pud) - goto err_unlock; - + goto out; pmd = pmd_alloc(mm, pud, addr); if (!pmd) - goto err_unlock; - - pte = pte_alloc_map(mm, pmd, addr); + goto out; + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) - goto err_unlock; + goto out; if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) { update_hiwater_rss(mm); @@ -150,17 +142,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); pte_val = *pte; - pte_unmap(pte); update_mmu_cache(vma, addr, pte_val); - spin_unlock(&mm->page_table_lock); - return 0; - -err_unlock: - spin_unlock(&mm->page_table_lock); + pte_unmap_unlock(pte, ptl); + err = 0; +out: return err; } - /*** * sys_remap_file_pages - remap arbitrary pages of a shared backing store * file within an existing vma. |