diff options
author | Juergen Gross <jgross@suse.com> | 2014-11-03 14:01:55 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-11-16 11:04:25 +0100 |
commit | 2a3746984c98b17b565e6a2c2bbaaaef757db1b4 (patch) | |
tree | 935af5f26372b40cf5d0f026b3880ad3cd53f827 /arch/x86/mm/pat.c | |
parent | 49a3b3cbdf1621678a39bd95a3e67c0f858539c7 (diff) | |
download | linux-2a3746984c98b17b565e6a2c2bbaaaef757db1b4.tar.bz2 |
x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()
Instead of directly using the cache mode bits in the pte switch to
using the cache mode type. As those are the main callers of
lookup_memtype(), change this as well.
Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-10-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r-- | arch/x86/mm/pat.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 6d5a8e3ef63d..2f3744fdc741 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -394,12 +394,12 @@ int free_memtype(u64 start, u64 end) * * Only to be called when PAT is enabled * - * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or - * _PAGE_CACHE_UC + * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS + * or _PAGE_CACHE_MODE_UC */ -static unsigned long lookup_memtype(u64 paddr) +static enum page_cache_mode lookup_memtype(u64 paddr) { - int rettype = _PAGE_CACHE_WB; + enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; struct memtype *entry; if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) @@ -408,13 +408,13 @@ static unsigned long lookup_memtype(u64 paddr) if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; page = pfn_to_page(paddr >> PAGE_SHIFT); - rettype = get_page_memtype(page); + rettype = pgprot2cachemode(__pgprot(get_page_memtype(page))); /* * -1 from get_page_memtype() implies RAM page is in its * default state and not reserved, and hence of type WB */ if (rettype == -1) - rettype = _PAGE_CACHE_WB; + rettype = _PAGE_CACHE_MODE_WB; return rettype; } @@ -423,9 +423,9 @@ static unsigned long lookup_memtype(u64 paddr) entry = rbt_memtype_lookup(paddr); if (entry != NULL) - rettype = entry->type; + rettype = pgprot2cachemode(__pgprot(entry->type)); else - rettype = _PAGE_CACHE_UC_MINUS; + rettype = _PAGE_CACHE_MODE_UC_MINUS; spin_unlock(&memtype_lock); return rettype; @@ -613,7 +613,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, if (!pat_enabled) return 0; - flags = lookup_memtype(paddr); + flags = cachemode2protval(lookup_memtype(paddr)); if (want_flags != flags) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, @@ -715,7 +715,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; - unsigned long flags; + enum page_cache_mode pcm; /* reserve the whole chunk starting from paddr */ if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { @@ -734,18 +734,18 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, * For anything smaller than the vma size we set prot based on the * lookup. */ - flags = lookup_memtype(paddr); + pcm = lookup_memtype(paddr); /* Check memtype for the remaining pages */ while (size > PAGE_SIZE) { size -= PAGE_SIZE; paddr += PAGE_SIZE; - if (flags != lookup_memtype(paddr)) + if (pcm != lookup_memtype(paddr)) return -EINVAL; } *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | - flags); + cachemode2protval(pcm)); return 0; } @@ -753,15 +753,15 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn) { - unsigned long flags; + enum page_cache_mode pcm; if (!pat_enabled) return 0; /* Set prot based on lookup */ - flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); + pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | - flags); + cachemode2protval(pcm)); return 0; } |