summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/align.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/align.c')
-rw-r--r--arch/powerpc/kernel/align.c173
1 files changed, 112 insertions, 61 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index a27ccd5dc6b9..de91f3ae631e 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -54,8 +54,6 @@ struct aligninfo {
/* DSISR bits reported for a DCBZ instruction: */
#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
-#define SWAP(a, b) (t = (a), (a) = (b), (b) = t)
-
/*
* The PowerPC stores certain bits of the instruction that caused the
* alignment exception in the DSISR register. This array maps those
@@ -256,11 +254,17 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
* bottom 4 bytes of each register, and the loads clear the
* top 4 bytes of the affected register.
*/
+#ifdef __BIG_ENDIAN__
#ifdef CONFIG_PPC64
#define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
#else
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
+#endif
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
@@ -305,6 +309,15 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
nb0 = nb + reg * 4 - 128;
nb = 128 - reg * 4;
}
+#ifdef __LITTLE_ENDIAN__
+ /*
+ * String instructions are endian neutral but the code
+ * below is not. Force byte swapping on so that the
+ * effects of swizzling are undone in the load/store
+ * loops below.
+ */
+ flags ^= SW;
+#endif
} else {
/* lwm, stmw */
nb = (32 - reg) * 4;
@@ -458,7 +471,7 @@ static struct aligninfo spe_aligninfo[32] = {
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
unsigned int instr)
{
- int t, ret;
+ int ret;
union {
u64 ll;
u32 w[2];
@@ -581,24 +594,18 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
if (flags & SW) {
switch (flags & 0xf0) {
case E8:
- SWAP(data.v[0], data.v[7]);
- SWAP(data.v[1], data.v[6]);
- SWAP(data.v[2], data.v[5]);
- SWAP(data.v[3], data.v[4]);
+ data.ll = swab64(data.ll);
break;
case E4:
-
- SWAP(data.v[0], data.v[3]);
- SWAP(data.v[1], data.v[2]);
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
+ data.w[0] = swab32(data.w[0]);
+ data.w[1] = swab32(data.w[1]);
break;
/* Its half word endian */
default:
- SWAP(data.v[0], data.v[1]);
- SWAP(data.v[2], data.v[3]);
- SWAP(data.v[4], data.v[5]);
- SWAP(data.v[6], data.v[7]);
+ data.h[0] = swab16(data.h[0]);
+ data.h[1] = swab16(data.h[1]);
+ data.h[2] = swab16(data.h[2]);
+ data.h[3] = swab16(data.h[3]);
break;
}
}
@@ -658,14 +665,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
flush_vsx_to_thread(current);
if (reg < 32)
- ptr = (char *) &current->thread.TS_FPR(reg);
+ ptr = (char *) &current->thread.fp_state.fpr[reg][0];
else
- ptr = (char *) &current->thread.vr[reg - 32];
+ ptr = (char *) &current->thread.vr_state.vr[reg - 32];
lptr = (unsigned long *) ptr;
+#ifdef __LITTLE_ENDIAN__
+ if (flags & SW) {
+ elsize = length;
+ sw = length-1;
+ } else {
+ /*
+ * The elements are BE ordered, even in LE mode, so process
+ * them in reverse order.
+ */
+ addr += length - elsize;
+
+ /* 8 byte memory accesses go in the top 8 bytes of the VR */
+ if (length == 8)
+ ptr += 8;
+ }
+#else
if (flags & SW)
sw = elsize-1;
+#endif
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
@@ -675,19 +699,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
ret |= __get_user(ptr[i^sw], addr + i);
}
ptr += elsize;
+#ifdef __LITTLE_ENDIAN__
+ addr -= elsize;
+#else
addr += elsize;
+#endif
}
+#ifdef __BIG_ENDIAN__
+#define VSX_HI 0
+#define VSX_LO 1
+#else
+#define VSX_HI 1
+#define VSX_LO 0
+#endif
+
if (!ret) {
if (flags & U)
regs->gpr[areg] = regs->dar;
/* Splat load copies the same data to top and bottom 8 bytes */
if (flags & SPLT)
- lptr[1] = lptr[0];
- /* For 8 byte loads, zero the top 8 bytes */
+ lptr[VSX_LO] = lptr[VSX_HI];
+ /* For 8 byte loads, zero the low 8 bytes */
else if (!(flags & ST) && (8 == length))
- lptr[1] = 0;
+ lptr[VSX_LO] = 0;
} else
return -EFAULT;
@@ -710,18 +746,28 @@ int fix_alignment(struct pt_regs *regs)
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
- int ret, t;
- union {
+ int ret, i;
+ union data {
u64 ll;
double dd;
unsigned char v[8];
struct {
+#ifdef __LITTLE_ENDIAN__
+ int low32;
+ unsigned hi32;
+#else
unsigned hi32;
int low32;
+#endif
} x32;
struct {
+#ifdef __LITTLE_ENDIAN__
+ short low16;
+ unsigned char hi48[6];
+#else
unsigned char hi48[6];
short low16;
+#endif
} x16;
} data;
@@ -780,8 +826,9 @@ int fix_alignment(struct pt_regs *regs)
/* Byteswap little endian loads and stores */
swiz = 0;
- if (regs->msr & MSR_LE) {
+ if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
flags ^= SW;
+#ifdef __BIG_ENDIAN__
/*
* So-called "PowerPC little endian" mode works by
* swizzling addresses rather than by actually doing
@@ -794,6 +841,7 @@ int fix_alignment(struct pt_regs *regs)
*/
if (cpu_has_feature(CPU_FTR_PPC_LE))
swiz = 7;
+#endif
}
/* DAR has the operand effective address */
@@ -818,7 +866,7 @@ int fix_alignment(struct pt_regs *regs)
elsize = 8;
flags = 0;
- if (regs->msr & MSR_LE)
+ if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
flags |= SW;
if (instruction & 0x100)
flags |= ST;
@@ -878,32 +926,36 @@ int fix_alignment(struct pt_regs *regs)
* get it from register values
*/
if (!(flags & ST)) {
- data.ll = 0;
- ret = 0;
- p = (unsigned long) addr;
+ unsigned int start = 0;
+
switch (nb) {
- case 8:
- ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
+ start = offsetof(union data, x32.low32);
+ break;
case 2:
- ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
- ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
- if (unlikely(ret))
- return -EFAULT;
+ start = offsetof(union data, x16.low16);
+ break;
}
+
+ data.ll = 0;
+ ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+ ret |= __get_user_inatomic(data.v[start + i],
+ SWIZ_PTR(p++));
+
+ if (unlikely(ret))
+ return -EFAULT;
+
} else if (flags & F) {
- data.dd = current->thread.TS_FPR(reg);
+ data.ll = current->thread.TS_FPR(reg);
if (flags & S) {
/* Single-precision FP store requires conversion... */
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
- cvt_df(&data.dd, (float *)&data.v[4]);
+ cvt_df(&data.dd, (float *)&data.x32.low32);
preempt_enable();
#else
return 0;
@@ -915,17 +967,13 @@ int fix_alignment(struct pt_regs *regs)
if (flags & SW) {
switch (nb) {
case 8:
- SWAP(data.v[0], data.v[7]);
- SWAP(data.v[1], data.v[6]);
- SWAP(data.v[2], data.v[5]);
- SWAP(data.v[3], data.v[4]);
+ data.ll = swab64(data.ll);
break;
case 4:
- SWAP(data.v[4], data.v[7]);
- SWAP(data.v[5], data.v[6]);
+ data.x32.low32 = swab32(data.x32.low32);
break;
case 2:
- SWAP(data.v[6], data.v[7]);
+ data.x16.low16 = swab16(data.x16.low16);
break;
}
}
@@ -947,7 +995,7 @@ int fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_PPC_FPU
preempt_disable();
enable_kernel_fp();
- cvt_fd((float *)&data.v[4], &data.dd);
+ cvt_fd((float *)&data.x32.low32, &data.dd);
preempt_enable();
#else
return 0;
@@ -957,25 +1005,28 @@ int fix_alignment(struct pt_regs *regs)
/* Store result to memory or update registers */
if (flags & ST) {
- ret = 0;
- p = (unsigned long) addr;
+ unsigned int start = 0;
+
switch (nb) {
- case 8:
- ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4:
- ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
+ start = offsetof(union data, x32.low32);
+ break;
case 2:
- ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
- ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
+ start = offsetof(union data, x16.low16);
+ break;
}
+
+ ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+ ret |= __put_user_inatomic(data.v[start + i],
+ SWIZ_PTR(p++));
+
if (unlikely(ret))
return -EFAULT;
} else if (flags & F)
- current->thread.TS_FPR(reg) = data.dd;
+ current->thread.TS_FPR(reg) = data.ll;
else
regs->gpr[reg] = data.ll;