summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s/64/radix.h
blob: 937d4e247ac31aeb614de3ba59c7e21900dac1cc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
#define _ASM_POWERPC_PGTABLE_RADIX_H

#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
#endif

#ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/radix-64k.h>
#else
#include <asm/book3s/64/radix-4k.h>
#endif

/* An empty PTE can still have a R or C writeback */
#define RADIX_PTE_NONE_MASK		(_PAGE_DIRTY | _PAGE_ACCESSED)

/* Bits to set in a RPMD/RPUD/RPGD */
#define RADIX_PMD_VAL_BITS		(0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
#define RADIX_PUD_VAL_BITS		(0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
#define RADIX_PGD_VAL_BITS		(0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)

/* Don't have anything in the reserved bits and leaf bits */
#define RADIX_PMD_BAD_BITS		0x60000000000000e0UL
#define RADIX_PUD_BAD_BITS		0x60000000000000e0UL
#define RADIX_PGD_BAD_BITS		0x60000000000000e0UL

/*
 * Size of EA range mapped by our pagetables.
 */
#define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE +	\
			      RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
#define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)

/*
 * We support 52 bit address space, Use top bit for kernel
 * virtual mapping. Also make sure kernel fit in the top
 * quadrant.
 *
 *           +------------------+
 *           +------------------+  Kernel virtual map (0xc008000000000000)
 *           |                  |
 *           |                  |
 *           |                  |
 * 0b11......+------------------+  Kernel linear map (0xc....)
 *           |                  |
 *           |     2 quadrant   |
 *           |                  |
 * 0b10......+------------------+
 *           |                  |
 *           |    1 quadrant    |
 *           |                  |
 * 0b01......+------------------+
 *           |                  |
 *           |    0 quadrant    |
 *           |                  |
 * 0b00......+------------------+
 *
 *
 * 3rd quadrant expanded:
 * +------------------------------+
 * |                              |
 * |                              |
 * |                              |
 * +------------------------------+  Kernel IO map end (0xc010000000000000)
 * |                              |
 * |                              |
 * |      1/2 of virtual map      |
 * |                              |
 * |                              |
 * +------------------------------+  Kernel IO map start
 * |                              |
 * |      1/4 of virtual map      |
 * |                              |
 * +------------------------------+  Kernel vmemap start
 * |                              |
 * |     1/4 of virtual map       |
 * |                              |
 * +------------------------------+  Kernel virt start (0xc008000000000000)
 * |                              |
 * |                              |
 * |                              |
 * +------------------------------+  Kernel linear (0xc.....)
 */

#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
#define RADIX_KERN_VIRT_SIZE  ASM_CONST(0x0008000000000000)

/*
 * The vmalloc space starts at the beginning of that region, and
 * occupies a quarter of it on radix config.
 * (we keep a quarter for the virtual memmap)
 */
#define RADIX_VMALLOC_START	RADIX_KERN_VIRT_START
#define RADIX_VMALLOC_SIZE	(RADIX_KERN_VIRT_SIZE >> 2)
#define RADIX_VMALLOC_END	(RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
/*
 * Defines the address of the vmemap area, in its own region on
 * hash table CPUs.
 */
#define RADIX_VMEMMAP_BASE		(RADIX_VMALLOC_END)

#ifndef __ASSEMBLY__
#define RADIX_PTE_TABLE_SIZE	(sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
#define RADIX_PMD_TABLE_SIZE	(sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
#define RADIX_PUD_TABLE_SIZE	(sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
#define RADIX_PGD_TABLE_SIZE	(sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)

static inline unsigned long radix__pte_update(struct mm_struct *mm,
					unsigned long addr,
					pte_t *ptep, unsigned long clr,
					unsigned long set,
					int huge)
{
	pte_t pte;
	unsigned long old_pte, new_pte;

	do {
		pte = READ_ONCE(*ptep);
		old_pte = pte_val(pte);
		new_pte = (old_pte | set) & ~clr;

	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));

	/* We already do a sync in cmpxchg, is ptesync needed ?*/
	asm volatile("ptesync" : : : "memory");
	/* huge pages use the old page table lock */
	if (!huge)
		assert_pte_locked(mm, addr);

	return old_pte;
}

/*
 * Set the dirty and/or accessed bits atomically in a linux PTE, this
 * function doesn't need to invalidate tlb.
 */
static inline void radix__ptep_set_access_flags(pte_t *ptep, pte_t entry)
{
	pte_t pte;
	unsigned long old_pte, new_pte;
	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
					      _PAGE_RW | _PAGE_EXEC);
	do {
		pte = READ_ONCE(*ptep);
		old_pte = pte_val(pte);
		new_pte = old_pte | set;

	} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));

	/* We already do a sync in cmpxchg, is ptesync needed ?*/
	asm volatile("ptesync" : : : "memory");
}

static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
{
	return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
}

static inline int radix__pte_none(pte_t pte)
{
	return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
}

static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
				 pte_t *ptep, pte_t pte, int percpu)
{
	*ptep = pte;
	asm volatile("ptesync" : : : "memory");
}

static inline int radix__pmd_bad(pmd_t pmd)
{
	return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
}

static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
	return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
}

static inline int radix__pud_bad(pud_t pud)
{
	return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
}


static inline int radix__pgd_bad(pgd_t pgd)
{
	return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE

static inline int radix__pmd_trans_huge(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_PTE);
}

static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
	return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
					    unsigned long address, pmd_t *pmdp)
{
	/* Nothing to do for radix. */
	return;
}

extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
					  pmd_t *pmdp, unsigned long clr,
					  unsigned long set);
extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
				  unsigned long address, pmd_t *pmdp);
extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
					pgtable_t pgtable);
extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
				      unsigned long addr, pmd_t *pmdp);
extern int radix__has_transparent_hugepage(void);
#endif

extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
					     unsigned long page_size,
					     unsigned long phys);
extern void radix__vmemmap_remove_mapping(unsigned long start,
				    unsigned long page_size);

extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
				 pgprot_t flags, unsigned int psz);
#endif /* __ASSEMBLY__ */
#endif