summaryrefslogtreecommitdiffstats
path: root/arch/c6x/include/asm/processor.h
blob: 8f7cce829f8e2630cb88e81cdb024a8375a59e4f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
/*
 *  Port on Texas Instruments TMS320C6x architecture
 *
 *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
 *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
 *
 *  Updated for 2.6.34: Mark Salter <msalter@redhat.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License version 2 as
 *  published by the Free Software Foundation.
 */
#ifndef _ASM_C6X_PROCESSOR_H
#define _ASM_C6X_PROCESSOR_H

#include <asm/ptrace.h>
#include <asm/page.h>
#include <asm/current.h>

/*
 * Default implementation of macro that returns current
 * instruction pointer ("program counter").
 */
#define current_text_addr()			\
({						\
	void *__pc;				\
	asm("mvc .S2 pce1,%0\n" : "=b"(__pc));	\
	__pc;					\
})

/*
 * User space process size. This is mostly meaningless for NOMMU
 * but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
 * Since calls like mmap() can return an address or an error, we
 * have to allow room for error returns when code does something
 * like:
 *
 *       addr = do_mmap(...)
 *       if ((unsigned long)addr >= TASK_SIZE)
 *            ... its an error code, not an address ...
 *
 * Here, we allow for 4096 error codes which means we really can't
 * use the last 4K page on systems with RAM extending all the way
 * to the end of the 32-bit address space.
 */
#define TASK_SIZE	0xFFFFF000

/*
 * This decides where the kernel will search for a free chunk of vm
 * space during mmap's. We won't be using it
 */
#define TASK_UNMAPPED_BASE	0

struct thread_struct {
	unsigned long long b15_14;
	unsigned long long a15_14;
	unsigned long long b13_12;
	unsigned long long a13_12;
	unsigned long long b11_10;
	unsigned long long a11_10;
	unsigned long long ricl_icl;
	unsigned long  usp;		/* user stack pointer */
	unsigned long  pc;		/* kernel pc */
	unsigned long  wchan;
};

#define INIT_THREAD					\
{							\
	.usp = 0,					\
	.wchan = 0,					\
}

#define INIT_MMAP { \
	&init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
	NULL, NULL }

#define task_pt_regs(task) \
	((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)

#define alloc_kernel_stack()	__get_free_page(GFP_KERNEL)
#define free_kernel_stack(page) free_page((page))


/* Forward declaration, a strange C thing */
struct task_struct;

extern void start_thread(struct pt_regs *regs, unsigned int pc,
			 unsigned long usp);

/* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task)
{
}

/*
 * saved kernel SP and DP of a blocked thread.
 */
#ifdef _BIG_ENDIAN
#define thread_saved_ksp(tsk) \
	(*(unsigned long *)&(tsk)->thread.b15_14)
#define thread_saved_dp(tsk) \
	(*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
#else
#define thread_saved_ksp(tsk) \
	(*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
#define thread_saved_dp(tsk) \
	(*(unsigned long *)&(tsk)->thread.b15_14)
#endif

extern unsigned long get_wchan(struct task_struct *p);

#define KSTK_EIP(task)	(task_pt_regs(task)->pc)
#define KSTK_ESP(task)	(task_pt_regs(task)->sp)

#define cpu_relax()		do { } while (0)

extern const struct seq_operations cpuinfo_op;

/* Reset the board */
#define HARD_RESET_NOW()

extern unsigned int c6x_core_freq;


extern void (*c6x_restart)(void);
extern void (*c6x_halt)(void);

#endif /* ASM_C6X_PROCESSOR_H */