1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_SYSTEM_H
#define _ASM_MICROBLAZE_SYSTEM_H
#include <asm/registers.h>
#include <asm/setup.h>
#include <asm/irqflags.h>
#include <asm/cache.h>
#include <asm-generic/cmpxchg.h>
#include <asm-generic/cmpxchg-local.h>
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
struct task_struct;
struct thread_info;
extern struct task_struct *_switch_to(struct thread_info *prev,
struct thread_info *next);
#define switch_to(prev, next, last) \
do { \
(last) = _switch_to(task_thread_info(prev), \
task_thread_info(next)); \
} while (0)
#define smp_read_barrier_depends() do {} while (0)
#define read_barrier_depends() do {} while (0)
#define nop() asm volatile ("nop")
#define mb() barrier()
#define rmb() mb()
#define wmb() mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
void show_trace(struct task_struct *task, unsigned long *stack);
void __bad_xchg(volatile void *ptr, int size);
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long ret;
unsigned long flags;
switch (size) {
case 1:
local_irq_save(flags);
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
local_irq_restore(flags);
break;
case 4:
local_irq_save(flags);
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
local_irq_restore(flags);
break;
default:
__bad_xchg(ptr, size), ret = 0;
break;
}
return ret;
}
void disable_hlt(void);
void enable_hlt(void);
void default_idle(void);
#define xchg(ptr, x) \
((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
void free_init_pages(char *what, unsigned long begin, unsigned long end);
void free_initmem(void);
extern char *klimit;
extern void ret_from_fork(void);
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
#ifdef CONFIG_DEBUG_FS
extern struct dentry *of_debugfs_root;
#endif
#define arch_align_stack(x) (x)
/*
* MicroBlaze doesn't handle unaligned accesses in hardware.
*
* Based on this we force the IP header alignment in network drivers.
* We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
* cacheline alignment of buffers.
*/
#define NET_IP_ALIGN 2
#define NET_SKB_PAD L1_CACHE_BYTES
#endif /* _ASM_MICROBLAZE_SYSTEM_H */
|