summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64/pgtable.h')
-rw-r--r--include/asm-ia64/pgtable.h80
1 files changed, 61 insertions, 19 deletions
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 3339c7b55a6f..e2560c58384b 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -84,32 +84,55 @@
#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
/*
- * Definitions for first level:
- *
- * PGDIR_SHIFT determines what a first-level page table entry can map.
+ * How many pointers will a page table level hold expressed in shift
*/
-#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
-#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
-#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
-#define FIRST_USER_ADDRESS 0
+#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
/*
- * Definitions for second level:
+ * Definitions for fourth level:
+ */
+#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
+
+/*
+ * Definitions for third level:
*
- * PMD_SHIFT determines the size of the area a second-level page table
+ * PMD_SHIFT determines the size of the area a third-level page table
* can map.
*/
-#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
+#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
+#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
+#ifdef CONFIG_PGTABLE_4
/*
- * Definitions for third level:
+ * Definitions for second level:
+ *
+ * PUD_SHIFT determines the size of the area a second-level page table
+ * can map.
+ */
+#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
+#endif
+
+/*
+ * Definitions for first level:
+ *
+ * PGDIR_SHIFT determines what a first-level page table entry can map.
*/
-#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
+#ifdef CONFIG_PGTABLE_4
+#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#else
+#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
+#endif
+#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
+#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
+#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
+#define FIRST_USER_ADDRESS 0
/*
* All the normal masks have the "page accessed" bits on, as any time
@@ -127,6 +150,7 @@
# ifndef __ASSEMBLY__
+#include <linux/sched.h> /* for mm_struct */
#include <asm/bitops.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
@@ -160,6 +184,9 @@
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+#ifdef CONFIG_PGTABLE_4
+#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -217,6 +244,9 @@ ia64_phys_addr_valid (unsigned long addr)
#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
+#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
+#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
+
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
* table entry (pte).
@@ -236,9 +266,6 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_modify(_pte, newprot) \
(__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
-#define page_pte_prot(page,prot) mk_pte(page, prot)
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
@@ -256,9 +283,16 @@ ia64_phys_addr_valid (unsigned long addr)
#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
#define pud_present(pud) (pud_val(pud) != 0UL)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
-
#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
+#ifdef CONFIG_PGTABLE_4
+#define pgd_none(pgd) (!pgd_val(pgd))
+#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
+#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
+#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
+#endif
+
/*
* The following have defined behavior only work if pte_present() is true.
*/
@@ -326,7 +360,13 @@ pgd_offset (struct mm_struct *mm, unsigned long address)
here. */
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
+#ifdef CONFIG_PGTABLE_4
/* Find an entry in the second-level page table.. */
+#define pud_offset(dir,addr) \
+ ((pud_t *) pgd_page(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+#endif
+
+/* Find an entry in the third-level page table.. */
#define pmd_offset(dir,addr) \
((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
@@ -559,7 +599,9 @@ do { \
#define __HAVE_ARCH_PGD_OFFSET_GATE
#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
+#ifndef CONFIG_PGTABLE_4
#include <asm-generic/pgtable-nopud.h>
+#endif
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */