diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-mips/pgtable.h | |
download | linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2 |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-mips/pgtable.h')
-rw-r--r-- | include/asm-mips/pgtable.h | 404 |
1 files changed, 404 insertions, 0 deletions
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h new file mode 100644 index 000000000000..878843203d67 --- /dev/null +++ b/include/asm-mips/pgtable.h @@ -0,0 +1,404 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 Ralf Baechle + */ +#ifndef _ASM_PGTABLE_H +#define _ASM_PGTABLE_H + +#include <asm-generic/4level-fixup.h> + +#include <linux/config.h> +#ifdef CONFIG_MIPS32 +#include <asm/pgtable-32.h> +#endif +#ifdef CONFIG_MIPS64 +#include <asm/pgtable-64.h> +#endif + +#include <asm/pgtable-bits.h> + +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + PAGE_CACHABLE_DEFAULT) +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + PAGE_CACHABLE_DEFAULT) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ + PAGE_CACHABLE_DEFAULT) +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ + _PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT) +#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ + PAGE_CACHABLE_DEFAULT) +#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ + __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) + +/* + * MIPS can't do page protection for execute, and considers that the same like + * read. Also, write permissions imply read permissions. This is the closest + * we can get by reasonable means.. + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY +#define __P101 PAGE_READONLY +#define __P110 PAGE_COPY +#define __P111 PAGE_COPY + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY +#define __S101 PAGE_READONLY +#define __S110 PAGE_SHARED +#define __S111 PAGE_SHARED + +/* + * ZERO_PAGE is a global shared page that is always zero; used + * for zero-mapped memory areas etc.. + */ + +extern unsigned long empty_zero_page; +extern unsigned long zero_page_mask; + +#define ZERO_PAGE(vaddr) \ + (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) + +extern void paging_init(void); + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define page_pte(page) page_pte_prot(page, __pgprot(0)) +#define pmd_phys(pmd) (pmd_val(pmd) - PAGE_OFFSET) +#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) +#define pmd_page_kernel(pmd) pmd_val(pmd) + +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + ptep->pte_high = pte.pte_high; + smp_wmb(); + ptep->pte_low = pte.pte_low; + //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low); + + if (pte_val(pte) & _PAGE_GLOBAL) { + pte_t *buddy = ptep_buddy(ptep); + /* + * Make sure the buddy is global too (if it's !none, + * it better already be global) + */ + if (pte_none(*buddy)) + buddy->pte_low |= _PAGE_GLOBAL; + } +} +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + /* Preserve global status for the pair */ + if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); + else + set_pte_at(mm, addr, ptep, __pte(0)); +} +#else +/* + * Certain architectures need to do special things when pte's + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; +#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) + if (pte_val(pteval) & _PAGE_GLOBAL) { + pte_t *buddy = ptep_buddy(ptep); + /* + * Make sure the buddy is global too (if it's !none, + * it better already be global) + */ + if (pte_none(*buddy)) + pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; + } +#endif +} +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ +#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) + /* Preserve global status for the pair */ + if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) + set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); + else +#endif + set_pte_at(mm, addr, ptep, __pte(0)); +} +#endif + +/* + * (pmds are folded into pgds so this doesn't get actually called, + * but the define is needed for a generic inline function.) + */ +#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) +#define set_pgd(pgdptr, pgdval) do { *(pgdptr) = (pgdval); } while(0) + +#define PGD_T_LOG2 ffz(~sizeof(pgd_t)) +#define PMD_T_LOG2 ffz(~sizeof(pmd_t)) +#define PTE_T_LOG2 ffz(~sizeof(pte_t)) + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_user(pte_t pte) { BUG(); return 0; } +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_READ; } +static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; } +static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; } +static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } +static inline pte_t pte_wrprotect(pte_t pte) +{ + (pte).pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + (pte).pte_high &= ~_PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_rdprotect(pte_t pte) +{ + (pte).pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ); + (pte).pte_high &= ~_PAGE_SILENT_READ; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + (pte).pte_low &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + (pte).pte_high &= ~_PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + (pte).pte_low &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + (pte).pte_high &= ~_PAGE_SILENT_READ; + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + (pte).pte_low |= _PAGE_WRITE; + if ((pte).pte_low & _PAGE_MODIFIED) { + (pte).pte_low |= _PAGE_SILENT_WRITE; + (pte).pte_high |= _PAGE_SILENT_WRITE; + } + return pte; +} + +static inline pte_t pte_mkread(pte_t pte) +{ + (pte).pte_low |= _PAGE_READ; + if ((pte).pte_low & _PAGE_ACCESSED) { + (pte).pte_low |= _PAGE_SILENT_READ; + (pte).pte_high |= _PAGE_SILENT_READ; + } + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + (pte).pte_low |= _PAGE_MODIFIED; + if ((pte).pte_low & _PAGE_WRITE) { + (pte).pte_low |= _PAGE_SILENT_WRITE; + (pte).pte_high |= _PAGE_SILENT_WRITE; + } + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + (pte).pte_low |= _PAGE_ACCESSED; + if ((pte).pte_low & _PAGE_READ) + (pte).pte_low |= _PAGE_SILENT_READ; + (pte).pte_high |= _PAGE_SILENT_READ; + return pte; +} +#else +static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_rdprotect(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); + return pte; +} + +static inline pte_t pte_mkwrite(pte_t pte) +{ + pte_val(pte) |= _PAGE_WRITE; + if (pte_val(pte) & _PAGE_MODIFIED) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkread(pte_t pte) +{ + pte_val(pte) |= _PAGE_READ; + if (pte_val(pte) & _PAGE_ACCESSED) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_MODIFIED; + if (pte_val(pte) & _PAGE_WRITE) + pte_val(pte) |= _PAGE_SILENT_WRITE; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= _PAGE_ACCESSED; + if (pte_val(pte) & _PAGE_READ) + pte_val(pte) |= _PAGE_SILENT_READ; + return pte; +} +#endif + +/* + * Macro to make mark a page protection value as "uncacheable". Note + * that "protection" is really a misnomer here as the protection value + * contains the memory attribute bits, dirty bits, and various other + * bits as well. + */ +#define pgprot_noncached pgprot_noncached + +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; + + return __pgprot(prot); +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte.pte_low &= _PAGE_CHG_MASK; + pte.pte_low |= pgprot_val(newprot); + pte.pte_high |= pgprot_val(newprot) & 0x3f; + return pte; +} +#else +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} +#endif + + +extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, + pte_t pte); +extern void __update_cache(struct vm_area_struct *vma, unsigned long address, + pte_t pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + __update_tlb(vma, address, pte); + __update_cache(vma, address, pte); +} + +#ifndef CONFIG_DISCONTIGMEM +#define kern_addr_valid(addr) (1) +#endif + +#ifdef CONFIG_64BIT_PHYS_ADDR +extern phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size); +extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); + +static inline int io_remap_page_range(struct vm_area_struct *vma, + unsigned long vaddr, + unsigned long paddr, + unsigned long size, + pgprot_t prot) +{ + phys_t phys_addr_high = fixup_bigphys_addr(paddr, size); + return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); +} + +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long vaddr, + unsigned long pfn, + unsigned long size, + pgprot_t prot) +{ + phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); + return remap_pfn_range(vma, vaddr, pfn, size, prot); +} +#else +#define io_remap_page_range(vma, vaddr, paddr, size, prot) \ + remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) +#endif + +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) + +#include <asm-generic/pgtable.h> + +/* + * We provide our own get_unmapped area to cope with the virtual aliasing + * constraints placed on us by the cache architecture. + */ +#define HAVE_ARCH_UNMAPPED_AREA + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +#endif /* _ASM_PGTABLE_H */ |