diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2008-10-15 22:01:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 11:21:29 -0700 |
commit | b4d1d99fdd8b98fb03dfd6ef9b0ece220de38640 (patch) | |
tree | e2bdf12aa53b71430edd312f441c79fdd6114535 /mm | |
parent | db99100d2ed40dd9736fcb1adb3657a98f9bcfd9 (diff) | |
download | linux-b4d1d99fdd8b98fb03dfd6ef9b0ece220de38640.tar.bz2 |
hugetlb: handle updating of ACCESSED and DIRTY in hugetlb_fault()
The page fault path for normal pages, if the fault is neither a no-page
fault nor a write-protect fault, will update the DIRTY and ACCESSED bits
in the page table appropriately.
The hugepage fault path, however, does not do this, handling only no-page
or write-protect type faults. It assumes that either the ACCESSED and
DIRTY bits are irrelevant for hugepages (usually true, since they are
never swapped) or that they are handled by the arch code.
This is inconvenient for some software-loaded TLB architectures, where the
_PAGE_ACCESSED (_PAGE_DIRTY) bits need to be set to enable read (write)
access to the page at the TLB miss. This could be worked around in the
arch TLB miss code, but the TLB miss fast path can be made simple more
easily if the hugetlb_fault() path handles this, as the normal page fault
path does.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Adam Litke <agl@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 67a71191136e..38633864a93e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2008,7 +2008,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { ret = hugetlb_no_page(mm, vma, address, ptep, write_access); - goto out_unlock; + goto out_mutex; } ret = 0; @@ -2024,7 +2024,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (write_access && !pte_write(entry)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; - goto out_unlock; + goto out_mutex; } if (!(vma->vm_flags & VM_SHARED)) @@ -2034,10 +2034,23 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, spin_lock(&mm->page_table_lock); /* Check for a racing update before calling hugetlb_cow */ - if (likely(pte_same(entry, huge_ptep_get(ptep)))) - if (write_access && !pte_write(entry)) + if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) + goto out_page_table_lock; + + + if (write_access) { + if (!pte_write(entry)) { ret = hugetlb_cow(mm, vma, address, ptep, entry, pagecache_page); + goto out_page_table_lock; + } + entry = pte_mkdirty(entry); + } + entry = pte_mkyoung(entry); + if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access)) + update_mmu_cache(vma, address, entry); + +out_page_table_lock: spin_unlock(&mm->page_table_lock); if (pagecache_page) { @@ -2045,7 +2058,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, put_page(pagecache_page); } -out_unlock: +out_mutex: mutex_unlock(&hugetlb_instantiation_mutex); return ret; |