diff options
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 20 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 43 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cacheflush.h | 42 | ||||
-rw-r--r-- | arch/powerpc/include/asm/fixmap.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kup.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 16 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 27 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 17 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/64/pgtable-4k.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/64/pgtable.h | 22 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/pgtable.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 28 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pkeys.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/tlb.h | 2 |
16 files changed, 47 insertions, 184 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index d7978a5a79c3..224912432821 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -112,6 +112,9 @@ static inline bool pte_user(pte_t pte) #define PMD_TABLE_SIZE 0 #define PUD_TABLE_SIZE 0 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +/* Bits to mask out from a PMD to get to the PTE page */ +#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) #endif /* __ASSEMBLY__ */ #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) @@ -332,26 +335,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) -#define pmd_page_vaddr(pmd) \ - ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* to find an entry in a page-table-directory */ -#define pgd_index(address) ((address) >> PGDIR_SHIFT) -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - -/* Find an entry in the third-level page table.. */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, addr) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) -#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) -static inline void pte_unmap(pte_t *pte) { } - /* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index f17442c3a092..25c3cb8272c0 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1005,52 +1005,9 @@ extern struct page *p4d_page(p4d_t p4d); /* Pointers in the page table tree are physical addresses */ #define __pgtable_ptr_val(ptr) __pa(ptr) -#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) #define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) -static inline unsigned long pgd_index(unsigned long address) -{ - return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); -} - -static inline unsigned long pud_index(unsigned long address) -{ - return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); -} - -static inline unsigned long pmd_index(unsigned long address) -{ - return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); -} - -static inline unsigned long pte_index(unsigned long address) -{ - return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); -} - -/* - * Find an entry in a page-table-directory. We combine the address region - * (the high order N bits) and the pgd portion of the address. - */ - -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - -#define pud_offset(p4dp, addr) \ - (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr)) -#define pmd_offset(pudp,addr) \ - (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) -#define pte_offset_kernel(dir,addr) \ - (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) - -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) - -static inline void pte_unmap(pte_t *pte) { } - -/* to find an entry in a kernel page-table-directory */ -/* This now only contains the vmalloc pages */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index e92191b390f3..de600b915a3c 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -4,23 +4,9 @@ #ifndef _ASM_POWERPC_CACHEFLUSH_H #define _ASM_POWERPC_CACHEFLUSH_H -#ifdef __KERNEL__ - #include <linux/mm.h> #include <asm/cputable.h> -/* - * No cache flushing is required when address mappings are changed, - * because the caches on PowerPCs are physically addressed. - */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_icache_page(vma, page) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - #ifdef CONFIG_PPC_BOOK3S_64 /* * Book3s has no ptesync after setting a pte, so without this ptesync it's @@ -33,20 +19,20 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end) { asm volatile("ptesync" ::: "memory"); } -#else -static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } -#endif +#define flush_cache_vmap flush_cache_vmap +#endif /* CONFIG_PPC_BOOK3S_64 */ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) void flush_icache_range(unsigned long start, unsigned long stop); -extern void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, - int len); -extern void flush_dcache_icache_page(struct page *page); +#define flush_icache_range flush_icache_range + +void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len); +#define flush_icache_user_page flush_icache_user_page + +void flush_dcache_icache_page(struct page *page); void __flush_dcache_icache(void *page); /** @@ -111,14 +97,6 @@ static inline void invalidate_dcache_range(unsigned long start, mb(); /* sync */ } -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ - } while (0) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) - -#endif /* __KERNEL__ */ +#include <asm-generic/cacheflush.h> #endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index ccbe2e83c950..29188810ba30 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -16,8 +16,8 @@ #ifndef __ASSEMBLY__ #include <linux/sizes.h> +#include <linux/pgtable.h> #include <asm/page.h> -#include <asm/pgtable.h> #ifdef CONFIG_HIGHMEM #include <linux/threads.h> #include <asm/kmap_types.h> diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 13f90dd03450..58635960403c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -34,7 +34,6 @@ extern struct pci_dev *isa_bridge_pcidev; #include <asm/mmiowb.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> -#include <asm/pgtable.h> #define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RD 0x399 diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index c745ee41ad66..1d0f7d838b2e 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -39,7 +39,7 @@ #else /* !__ASSEMBLY__ */ -#include <asm/pgtable.h> +#include <linux/pgtable.h> void setup_kup(void); diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 8dd24c7692a0..d32ec9ae73bd 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); -extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, - struct kvm_vcpu *vcpu, unsigned long addr, - unsigned long status); +extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, + unsigned long addr, unsigned long status); extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid); -extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store); extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); @@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void); extern int kvmppc_mmu_hv_init(void); extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); -extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, - struct kvm_vcpu *vcpu, +extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr); extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, @@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); -extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing, bool *writable); extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, @@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); void kvmhv_release_all_nested(struct kvm *kvm); long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); -int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, +int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr); void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); -long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu); +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 337047ba4a56..7e2d061d0445 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -795,7 +795,6 @@ struct kvm_vcpu_arch { struct mmio_hpte_cache_entry *pgfault_cache; struct task_struct *run_task; - struct kvm_run *kvm_run; spinlock_t vpa_update_lock; struct kvmppc_vpa vpa; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 94f5a32acaf1..ccf66b3a4c1d 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -58,28 +58,28 @@ enum xlate_readwrite { XLATE_WRITE /* check for write permissions */ }; -extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); -extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); +extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); +extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); extern void kvmppc_handler_highmem(void); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); -extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend); -extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, unsigned int rs, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); -extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, +extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, int rs, unsigned int bytes, int is_default_endian); @@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); -extern int kvmppc_emulate_instruction(struct kvm_run *run, - struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu); extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); -extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu); extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); @@ -267,7 +266,7 @@ struct kvmppc_ops { void (*vcpu_put)(struct kvm_vcpu *vcpu); void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); + int (*vcpu_run)(struct kvm_vcpu *vcpu); int (*vcpu_create)(struct kvm_vcpu *vcpu); void (*vcpu_free)(struct kvm_vcpu *vcpu); int (*check_requests)(struct kvm_vcpu *vcpu); @@ -291,7 +290,7 @@ struct kvmppc_ops { int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); - int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, + int (*emulate_op)(struct kvm_vcpu *vcpu, unsigned int inst, int *advance); int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index af7f13cf90cf..b56f14160ae5 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -28,6 +28,8 @@ extern int icache_44x_need_flush; #define PMD_TABLE_SIZE 0 #define PUD_TABLE_SIZE 0 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) #endif /* __ASSEMBLY__ */ #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) @@ -203,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp) *pmdp = __pmd(0); } - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - /* to find an entry in a page-table-directory */ #define pgd_index(address) ((address) >> PGDIR_SHIFT) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) @@ -330,8 +328,6 @@ static inline int pte_young(pte_t pte) * of the pte page. -- paulus */ #ifndef CONFIG_BOOKE -#define pmd_page_vaddr(pmd) \ - ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) #define pmd_page(pmd) \ pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) #else @@ -341,15 +337,6 @@ static inline int pte_young(pte_t pte) pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) #endif -/* Find an entry in the third-level page table.. */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, addr) \ - (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ - pte_index(addr)) -#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) -static inline void pte_unmap(pte_t *pte) { } - /* * Encode and decode a swap entry. * Note that the bits we use in a PTE for representing a swap entry diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index 81b1c54e3cf1..fe2f4c9acd9e 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -78,10 +78,6 @@ extern struct page *p4d_page(p4d_t p4d); #endif /* !__ASSEMBLY__ */ -#define pud_offset(p4dp, addr) \ - (((pud_t *) p4d_page_vaddr(*(p4dp))) + \ - (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) - #define pud_ERROR(e) \ pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 3424381b81da..6cb8aa357191 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -182,28 +182,6 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val) *p4dp = __p4d(val); } -/* - * Find an entry in a page-table-directory. We combine the address region - * (the high order N bits) and the pgd portion of the address. - */ -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) - -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - -#define pmd_offset(pudp,addr) \ - (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) - -#define pte_offset_kernel(dir,addr) \ - (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) - -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) - -static inline void pte_unmap(pte_t *pte) { } - -/* to find an entry in a kernel page-table-directory */ -/* This now only contains the vmalloc pages */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 50a4b0bb8d16..4b7c3472eab1 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -56,7 +56,7 @@ static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } #ifdef CONFIG_NUMA_BALANCING /* * These work without NUMA balancing but the kernel does not care. See the - * comment in include/asm-generic/pgtable.h . On powerpc, this will only + * comment in include/linux/pgtable.h . On powerpc, this will only * work for user pages and always return true for kernel pages. */ static inline int pte_protnone(pte_t pte) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index ae58b524a924..f7613f43c9cf 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -41,25 +41,6 @@ struct mm_struct; #ifndef __ASSEMBLY__ -#ifdef CONFIG_PPC32 -static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va) -{ - return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); -} - -static inline pmd_t *pmd_ptr_k(unsigned long va) -{ - return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); -} - -static inline pte_t *virt_to_kpte(unsigned long vaddr) -{ - pmd_t *pmd = pmd_ptr_k(vaddr); - - return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); -} -#endif - #include <asm/tlbflush.h> /* Keep these as a macros to avoid include dependency mess */ @@ -76,6 +57,13 @@ static inline pgprot_t pte_pgprot(pte_t pte) return __pgprot(pte_flags); } +#ifndef pmd_page_vaddr +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS)); +} +#define pmd_page_vaddr pmd_page_vaddr +#endif /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -96,8 +84,6 @@ extern unsigned long ioremap_bot; */ #define kern_addr_valid(addr) (1) -#include <asm-generic/pgtable.h> - #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_large(pmd) 0 #endif diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h index 20ebf153c871..2fe6cae14d10 100644 --- a/arch/powerpc/include/asm/pkeys.h +++ b/arch/powerpc/include/asm/pkeys.h @@ -101,7 +101,7 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) /* * Returns a positive, 5-bit key on success, or -1 on failure. - * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and + * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and * mm_pkey_free(). */ static inline int mm_pkey_alloc(struct mm_struct *mm) diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 7f3a8b902325..862985cf5180 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -10,7 +10,7 @@ #ifdef __KERNEL__ #ifndef __powerpc64__ -#include <asm/pgtable.h> +#include <linux/pgtable.h> #endif #include <asm/pgalloc.h> #ifndef __powerpc64__ |