summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-12 16:02:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-12 16:02:06 -0700
commit6c9d370c16aad97cfc6de68666634eaabac2b048 (patch)
tree7af08f77133d5e6329f851514f5758651432d812
parent03906ca389211e167e0d8e9e5ec330e9be467936 (diff)
parent620b155034570f577470cf5309f741bac6a6e32b (diff)
downloadlinux-6c9d370c16aad97cfc6de68666634eaabac2b048.tar.bz2
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS fixes from Ralf Baechle: "One build fix for build breakage of all MIPS SMP kernels caused by Rusty's fix of obsolete use of cpu mask helpers, another to fix the FP ABI selection when loading an ELF binary" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: MIPS: fix FP mode selection in lieu of .MIPS.abiflags data MIPS: SMP: Fix build error.
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/kernel/elf.c32
-rw-r--r--arch/mips/kernel/smp.c6
3 files changed, 22 insertions, 18 deletions
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index bb02fac9b4fa..2b25d1ba1ea0 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -45,7 +45,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_DUMP 0x8
#define SMP_ASK_C0COUNT 0x10
-extern volatile cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callin_map;
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index be4899f3c393..4a4d9e067c89 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
/* Lets see if this is an O32 ELF */
if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
- /* FR = 1 for N32 */
- if (ehdr32->e_flags & EF_MIPS_ABI2)
- state->overall_fp_mode = FP_FR1;
- else
- /* Set a good default FPU mode for O32 */
- state->overall_fp_mode = cpu_has_mips_r6 ?
- FP_FRE : FP_FR0;
-
if (ehdr32->e_flags & EF_MIPS_FP64) {
/*
* Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
(char *)&abiflags,
sizeof(abiflags));
} else {
- /* FR=1 is really the only option for 64-bit */
- state->overall_fp_mode = FP_FR1;
-
if (phdr64->p_type != PT_MIPS_ABIFLAGS)
return 0;
if (phdr64->p_filesz < sizeof(abiflags))
@@ -137,6 +126,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
struct elf32_hdr *ehdr = _ehdr;
struct mode_req prog_req, interp_req;
int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
+ bool is_mips64;
if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
@@ -152,10 +142,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
abi0 = abi1 = fp_abi;
}
- /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
- max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
- (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
- MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
+ is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
+ (ehdr->e_flags & EF_MIPS_ABI2);
+
+ if (is_mips64) {
+ /* MIPS64 code always uses FR=1, thus the default is easy */
+ state->overall_fp_mode = FP_FR1;
+
+ /* Disallow access to the various FPXX & FP64 ABIs */
+ max_abi = MIPS_ABI_FP_SOFT;
+ } else {
+ /* Default to a mode capable of running code expecting FR=0 */
+ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
+
+ /* Allow all ABIs we know about */
+ max_abi = MIPS_ABI_FP_64A;
+ }
if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
(abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 193ace7955fb..faa46ebd9dda 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -43,7 +43,7 @@
#include <asm/time.h>
#include <asm/setup.h>
-volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
+cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
@@ -218,8 +218,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
/*
* Trust is futile. We should really have timeouts ...
*/
- while (!cpumask_test_cpu(cpu, &cpu_callin_map))
+ while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
udelay(100);
+ schedule();
+ }
synchronise_count_master(cpu);
return 0;