1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
/*
* SMP support for Emma Mobile EV2
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Copyright (C) 2012 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <mach/common.h>
#include <mach/emev2.h>
#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <asm/cacheflush.h>
#define EMEV2_SCU_BASE 0x1e000000
static DEFINE_SPINLOCK(scu_lock);
static void __iomem *scu_base;
static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
{
unsigned long tmp;
/* we assume this code is running on a different cpu
* than the one that is changing coherency setting */
spin_lock(&scu_lock);
tmp = readl(scu_base + 8);
tmp &= ~clr;
tmp |= set;
writel(tmp, scu_base + 8);
spin_unlock(&scu_lock);
}
static unsigned int __init emev2_get_core_count(void)
{
if (!scu_base) {
scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
emev2_clock_init(); /* need ioremapped SMU */
}
WARN_ON_ONCE(!scu_base);
return scu_base ? scu_get_core_count(scu_base) : 1;
}
static int emev2_platform_cpu_kill(unsigned int cpu)
{
return 0; /* not supported yet */
}
static int __maybe_unused emev2_cpu_kill(unsigned int cpu)
{
int k;
/* this function is running on another CPU than the offline target,
* here we need wait for shutdown code in platform_cpu_die() to
* finish before asking SoC-specific code to power off the CPU core.
*/
for (k = 0; k < 1000; k++) {
if (shmobile_cpu_is_dead(cpu))
return emev2_platform_cpu_kill(cpu);
mdelay(1);
}
return 0;
}
static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
cpu = cpu_logical_map(cpu);
/* enable cache coherency */
modify_scu_cpu_psr(0, 3 << (cpu * 8));
/* Tell ROM loader about our vector (in headsmp.S) */
emev2_set_boot_vector(__pa(shmobile_secondary_vector));
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
return 0;
}
static void __init emev2_smp_prepare_cpus(unsigned int max_cpus)
{
int cpu = cpu_logical_map(0);
scu_enable(scu_base);
/* enable cache coherency on CPU0 */
modify_scu_cpu_psr(0, 3 << (cpu * 8));
}
static void __init emev2_smp_init_cpus(void)
{
unsigned int ncores = emev2_get_core_count();
shmobile_smp_init_cpus(ncores);
}
struct smp_operations emev2_smp_ops __initdata = {
.smp_init_cpus = emev2_smp_init_cpus,
.smp_prepare_cpus = emev2_smp_prepare_cpus,
.smp_boot_secondary = emev2_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = emev2_cpu_kill,
.cpu_die = shmobile_cpu_die,
.cpu_disable = shmobile_cpu_disable,
#endif
};
|