1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
#define _XTENSA_CACHEFLUSH_H
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* Lo-level routines for cache flushing.
*
* invalidate data or instruction cache:
*
* __invalidate_icache_all()
* __invalidate_icache_page(adr)
* __invalidate_dcache_page(adr)
* __invalidate_icache_range(from,size)
* __invalidate_dcache_range(from,size)
*
* flush data cache:
*
* __flush_dcache_page(adr)
*
* flush and invalidate data cache:
*
* __flush_invalidate_dcache_all()
* __flush_invalidate_dcache_page(adr)
* __flush_invalidate_dcache_range(from,size)
*
* specials for cache aliasing:
*
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_icache_page_alias(vaddr,paddr)
*/
extern void __invalidate_dcache_all(void);
extern void __invalidate_icache_all(void);
extern void __invalidate_dcache_page(unsigned long);
extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long);
#if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long);
extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
# define __flush_dcache_range(p,s) do { } while(0)
# define __flush_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
#else
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
unsigned long phys) { }
#endif
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
#else
static inline void __invalidate_icache_page_alias(unsigned long virt,
unsigned long phys) { }
#endif
/*
* We have physically tagged caches - nothing to do here -
* unless we have cache aliasing.
*
* Pages can get remapped. Because this might change the 'color' of that page,
* we have to flush the cache before the PTE is changed.
* (see also Documentation/cachetlb.txt)
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
#ifdef CONFIG_SMP
void flush_cache_all(void);
void flush_cache_range(struct vm_area_struct*, ulong, ulong);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
#else
#define flush_cache_all local_flush_cache_all
#define flush_cache_range local_flush_cache_range
#define flush_icache_range local_flush_icache_range
#define flush_cache_page local_flush_cache_page
#endif
#define local_flush_cache_all() \
do { \
__flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \
} while (0)
#define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_vmap(start,end) flush_cache_all()
#define flush_cache_vunmap(start,end) flush_cache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*);
void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void local_flush_cache_page(struct vm_area_struct *vma,
unsigned long address, unsigned long pfn);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_vmap(start,end) do { } while (0)
#define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range local_flush_icache_range
#define flush_cache_page(vma, addr, pfn) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#endif
/* Ensure consistency between data and instruction cache. */
#define local_flush_icache_range(start, end) \
do { \
__flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
/* This is not required, see Documentation/cachetlb.txt */
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
__flush_dcache_range((unsigned long) dst, len); \
__invalidate_icache_range((unsigned long) dst, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif
#define XTENSA_CACHEBLK_LOG2 29
#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
#if XCHAL_HAVE_CACHEATTR
static inline u32 xtensa_get_cacheattr(void)
{
u32 r;
asm volatile(" rsr %0, cacheattr" : "=a"(r));
return r;
}
static inline u32 xtensa_get_dtlb1(u32 addr)
{
u32 r = addr & XTENSA_CACHEBLK_MASK;
return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
& 0xF);
}
#else
static inline u32 xtensa_get_dtlb1(u32 addr)
{
u32 r;
asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
asm volatile(" dsync");
return r;
}
static inline u32 xtensa_get_cacheattr(void)
{
u32 r = 0;
u32 a = 0;
do {
a -= XTENSA_CACHEBLK_SIZE;
r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
} while (a);
return r;
}
#endif
static inline int xtensa_need_flush_dma_source(u32 addr)
{
return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
}
static inline int xtensa_need_invalidate_dma_destination(u32 addr)
{
return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
}
static inline void flush_dcache_unaligned(u32 addr, u32 size)
{
u32 cnt;
if (size) {
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
while (cnt--) {
asm volatile(" dhwb %0, 0" : : "a"(addr));
addr += XCHAL_DCACHE_LINESIZE;
}
asm volatile(" dsync");
}
}
static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
{
int cnt;
if (size) {
asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
while (cnt-- > 0) {
asm volatile(" dhi %0, %1" : : "a"(addr),
"n"(XCHAL_DCACHE_LINESIZE));
addr += XCHAL_DCACHE_LINESIZE;
}
asm volatile(" dhwbi %0, %1" : : "a"(addr),
"n"(XCHAL_DCACHE_LINESIZE));
asm volatile(" dsync");
}
}
static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
{
u32 cnt;
if (size) {
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
while (cnt--) {
asm volatile(" dhwbi %0, 0" : : "a"(addr));
addr += XCHAL_DCACHE_LINESIZE;
}
asm volatile(" dsync");
}
}
#endif /* _XTENSA_CACHEFLUSH_H */
|