diff options
author | Trond Myklebust <trond.myklebust@primarydata.com> | 2015-04-23 15:16:37 -0400 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@primarydata.com> | 2015-04-23 15:16:37 -0400 |
commit | f139b6c676c7e49b66016b28bf3f8ec5c54be891 (patch) | |
tree | 742f00e431dded1daf642b44f4c199b318f255dc /mm/memory.c | |
parent | 21330b667070fd64b2340d8d31c1b0800df78ec8 (diff) | |
parent | d654788e98f74f2df8dfc6079fa314938f739486 (diff) | |
download | linux-f139b6c676c7e49b66016b28bf3f8ec5c54be891.tar.bz2 |
Merge tag 'nfs-rdma-for-4.1-1' of git://git.linux-nfs.org/projects/anna/nfs-rdma
NFS: NFSoRDMA Client Changes
This patch series creates an operation vector for each of the different
memory registration modes. This should make it easier to one day increase
credit limit, rsize, and wsize.
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..97839f5c8c30 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3035,6 +3035,7 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, int last_cpupid; int target_nid; bool migrated = false; + bool was_writable = pte_write(pte); int flags = 0; /* A PROT_NONE fault should not end up here */ @@ -3059,6 +3060,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Make it present again */ pte = pte_modify(pte, vma->vm_page_prot); pte = pte_mkyoung(pte); + if (was_writable) + pte = pte_mkwrite(pte); set_pte_at(mm, addr, ptep, pte); update_mmu_cache(vma, addr, ptep); @@ -3069,11 +3072,14 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * Avoid grouping on DSO/COW pages in specific and RO pages - * in general, RO pages shouldn't hurt as much anyway since - * they can be in shared cache state. + * Avoid grouping on RO pages in general. RO pages shouldn't hurt as + * much anyway since they can be in shared cache state. This misses + * the case where a mapping is writable but the process never writes + * to it but pte_write gets cleared during protection updates and + * pte_dirty has unpredictable behaviour between PTE scan updates, + * background writeback, dirty balancing and application behaviour. */ - if (!pte_write(pte)) + if (!(vma->vm_flags & VM_WRITE)) flags |= TNF_NO_GROUP; /* @@ -3097,7 +3103,8 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, if (migrated) { page_nid = target_nid; flags |= TNF_MIGRATED; - } + } else + flags |= TNF_MIGRATE_FAIL; out: if (page_nid != -1) |